code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
#!/usr/bin/env python
# Sanjaya Gajurel, Computational Scientist, Case Western Reserve University, April 2015
import vtk
# ------------------------------------------------------------------------------
# Script Entry Point
# ------------------------------------------------------------------------------
if __name__ == "__main__":
print("vtkGraph: Building a graph using Unstructured Grid & dumping it in a vtk file, vertex.vtu, to be visualized using ParaView")
pointSource = vtk.vtkPointSource()
pointSource.Update()
# Create an integer array to store vertex id data & link it with its degree value as a scalar.
degree = vtk.vtkIntArray()
degree.SetNumberOfComponents(1)
degree.SetName("degree")
degree.SetNumberOfTuples(7)
degree.SetValue(0, 2)
degree.SetValue(1, 1)
degree.SetValue(2, 3)
degree.SetValue(3, 3)
degree.SetValue(4, 4)
degree.SetValue(5, 2)
degree.SetValue(6, 1)
pointSource.GetOutput().GetPointData().AddArray(degree)
# Assign co-ordinates for vertices
Points = vtk.vtkPoints()
Points.InsertNextPoint(0, 1, 0)
Points.InsertNextPoint(0, 0, 0)
Points.InsertNextPoint(1, 1, 0)
Points.InsertNextPoint(1, 0, 0)
Points.InsertNextPoint(2, 1, 0)
Points.InsertNextPoint(2, 0, 0)
Points.InsertNextPoint(3, 0, 0)
# Establish the specified edges using CellArray
line = vtk.vtkCellArray()
line.Allocate(8)
line.InsertNextCell(2)
line.InsertCellPoint(0)
line.InsertCellPoint(1)
line.InsertNextCell(2)
line.InsertCellPoint(0)
line.InsertCellPoint(2)
line.InsertNextCell(2)
line.InsertCellPoint(2)
line.InsertCellPoint(3)
line.InsertNextCell(2)
line.InsertCellPoint(2)
line.InsertCellPoint(4)
line.InsertNextCell(2)
line.InsertCellPoint(3)
line.InsertCellPoint(4)
line.InsertNextCell(2)
line.InsertCellPoint(3)
line.InsertCellPoint(5)
line.InsertNextCell(2)
line.InsertCellPoint(4)
line.InsertCellPoint(5)
line.InsertNextCell(2)
line.InsertCellPoint(4)
line.InsertCellPoint(6)
# Add the vertices and edges to unstructured Grid
G = vtk.vtkUnstructuredGrid()
G.GetPointData().SetScalars(degree)
G.SetPoints(Points)
G.SetCells(vtk.VTK_LINE, line)
# Dump the graph in VTK unstructured format (.vtu)
gw = vtk.vtkXMLUnstructuredGridWriter()
gw.SetFileName("vertex.vtu")
gw.SetInputData(G)
gw.Write()
print('---> ')
print("Feed the vertex.vtu file in ParaView/VisIt.")
|
normal
|
{
"blob_id": "de7515cb71c8e30018b14baf8846648d0c76a592",
"index": 7461,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n print(\n 'vtkGraph: Building a graph using Unstructured Grid & dumping it in a vtk file, vertex.vtu, to be visualized using ParaView'\n )\n pointSource = vtk.vtkPointSource()\n pointSource.Update()\n degree = vtk.vtkIntArray()\n degree.SetNumberOfComponents(1)\n degree.SetName('degree')\n degree.SetNumberOfTuples(7)\n degree.SetValue(0, 2)\n degree.SetValue(1, 1)\n degree.SetValue(2, 3)\n degree.SetValue(3, 3)\n degree.SetValue(4, 4)\n degree.SetValue(5, 2)\n degree.SetValue(6, 1)\n pointSource.GetOutput().GetPointData().AddArray(degree)\n Points = vtk.vtkPoints()\n Points.InsertNextPoint(0, 1, 0)\n Points.InsertNextPoint(0, 0, 0)\n Points.InsertNextPoint(1, 1, 0)\n Points.InsertNextPoint(1, 0, 0)\n Points.InsertNextPoint(2, 1, 0)\n Points.InsertNextPoint(2, 0, 0)\n Points.InsertNextPoint(3, 0, 0)\n line = vtk.vtkCellArray()\n line.Allocate(8)\n line.InsertNextCell(2)\n line.InsertCellPoint(0)\n line.InsertCellPoint(1)\n line.InsertNextCell(2)\n line.InsertCellPoint(0)\n line.InsertCellPoint(2)\n line.InsertNextCell(2)\n line.InsertCellPoint(2)\n line.InsertCellPoint(3)\n line.InsertNextCell(2)\n line.InsertCellPoint(2)\n line.InsertCellPoint(4)\n line.InsertNextCell(2)\n line.InsertCellPoint(3)\n line.InsertCellPoint(4)\n line.InsertNextCell(2)\n line.InsertCellPoint(3)\n line.InsertCellPoint(5)\n line.InsertNextCell(2)\n line.InsertCellPoint(4)\n line.InsertCellPoint(5)\n line.InsertNextCell(2)\n line.InsertCellPoint(4)\n line.InsertCellPoint(6)\n G = vtk.vtkUnstructuredGrid()\n G.GetPointData().SetScalars(degree)\n G.SetPoints(Points)\n G.SetCells(vtk.VTK_LINE, line)\n gw = vtk.vtkXMLUnstructuredGridWriter()\n gw.SetFileName('vertex.vtu')\n gw.SetInputData(G)\n gw.Write()\n print('---> ')\n print('Feed the vertex.vtu file in ParaView/VisIt.')\n",
"step-3": "import vtk\nif __name__ == '__main__':\n print(\n 'vtkGraph: Building a graph using Unstructured Grid & dumping it in a vtk file, vertex.vtu, to be visualized using ParaView'\n )\n pointSource = vtk.vtkPointSource()\n pointSource.Update()\n degree = vtk.vtkIntArray()\n degree.SetNumberOfComponents(1)\n degree.SetName('degree')\n degree.SetNumberOfTuples(7)\n degree.SetValue(0, 2)\n degree.SetValue(1, 1)\n degree.SetValue(2, 3)\n degree.SetValue(3, 3)\n degree.SetValue(4, 4)\n degree.SetValue(5, 2)\n degree.SetValue(6, 1)\n pointSource.GetOutput().GetPointData().AddArray(degree)\n Points = vtk.vtkPoints()\n Points.InsertNextPoint(0, 1, 0)\n Points.InsertNextPoint(0, 0, 0)\n Points.InsertNextPoint(1, 1, 0)\n Points.InsertNextPoint(1, 0, 0)\n Points.InsertNextPoint(2, 1, 0)\n Points.InsertNextPoint(2, 0, 0)\n Points.InsertNextPoint(3, 0, 0)\n line = vtk.vtkCellArray()\n line.Allocate(8)\n line.InsertNextCell(2)\n line.InsertCellPoint(0)\n line.InsertCellPoint(1)\n line.InsertNextCell(2)\n line.InsertCellPoint(0)\n line.InsertCellPoint(2)\n line.InsertNextCell(2)\n line.InsertCellPoint(2)\n line.InsertCellPoint(3)\n line.InsertNextCell(2)\n line.InsertCellPoint(2)\n line.InsertCellPoint(4)\n line.InsertNextCell(2)\n line.InsertCellPoint(3)\n line.InsertCellPoint(4)\n line.InsertNextCell(2)\n line.InsertCellPoint(3)\n line.InsertCellPoint(5)\n line.InsertNextCell(2)\n line.InsertCellPoint(4)\n line.InsertCellPoint(5)\n line.InsertNextCell(2)\n line.InsertCellPoint(4)\n line.InsertCellPoint(6)\n G = vtk.vtkUnstructuredGrid()\n G.GetPointData().SetScalars(degree)\n G.SetPoints(Points)\n G.SetCells(vtk.VTK_LINE, line)\n gw = vtk.vtkXMLUnstructuredGridWriter()\n gw.SetFileName('vertex.vtu')\n gw.SetInputData(G)\n gw.Write()\n print('---> ')\n print('Feed the vertex.vtu file in ParaView/VisIt.')\n",
"step-4": "#!/usr/bin/env python\n# Sanjaya Gajurel, Computational Scientist, Case Western Reserve University, April 2015\n\nimport vtk\n\n# ------------------------------------------------------------------------------\n# Script Entry Point\n# ------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n\n print(\"vtkGraph: Building a graph using Unstructured Grid & dumping it in a vtk file, vertex.vtu, to be visualized using ParaView\")\n\n pointSource = vtk.vtkPointSource()\n pointSource.Update()\n\n # Create an integer array to store vertex id data & link it with its degree value as a scalar.\n degree = vtk.vtkIntArray()\n degree.SetNumberOfComponents(1)\n degree.SetName(\"degree\")\n degree.SetNumberOfTuples(7)\n degree.SetValue(0, 2)\n degree.SetValue(1, 1)\n degree.SetValue(2, 3)\n degree.SetValue(3, 3)\n degree.SetValue(4, 4)\n degree.SetValue(5, 2)\n degree.SetValue(6, 1)\n\n pointSource.GetOutput().GetPointData().AddArray(degree)\n\n # Assign co-ordinates for vertices\n Points = vtk.vtkPoints()\n Points.InsertNextPoint(0, 1, 0)\n Points.InsertNextPoint(0, 0, 0)\n Points.InsertNextPoint(1, 1, 0)\n Points.InsertNextPoint(1, 0, 0)\n Points.InsertNextPoint(2, 1, 0)\n Points.InsertNextPoint(2, 0, 0)\n Points.InsertNextPoint(3, 0, 0)\n\n # Establish the specified edges using CellArray\n line = vtk.vtkCellArray()\n line.Allocate(8)\n line.InsertNextCell(2)\n line.InsertCellPoint(0)\n line.InsertCellPoint(1)\n line.InsertNextCell(2)\n line.InsertCellPoint(0)\n line.InsertCellPoint(2)\n line.InsertNextCell(2)\n line.InsertCellPoint(2)\n line.InsertCellPoint(3)\n line.InsertNextCell(2)\n line.InsertCellPoint(2)\n line.InsertCellPoint(4)\n line.InsertNextCell(2)\n line.InsertCellPoint(3)\n line.InsertCellPoint(4)\n line.InsertNextCell(2)\n line.InsertCellPoint(3)\n line.InsertCellPoint(5)\n line.InsertNextCell(2)\n line.InsertCellPoint(4)\n line.InsertCellPoint(5)\n line.InsertNextCell(2)\n line.InsertCellPoint(4)\n line.InsertCellPoint(6)\n\n # Add the vertices and edges to unstructured Grid\n G = vtk.vtkUnstructuredGrid()\n G.GetPointData().SetScalars(degree)\n G.SetPoints(Points)\n G.SetCells(vtk.VTK_LINE, line)\n\n # Dump the graph in VTK unstructured format (.vtu)\n gw = vtk.vtkXMLUnstructuredGridWriter()\n gw.SetFileName(\"vertex.vtu\")\n gw.SetInputData(G)\n gw.Write()\n print('---> ')\n\n print(\"Feed the vertex.vtu file in ParaView/VisIt.\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
board.display_board()
<|reserved_special_token_0|>
game.take_shot("""
Choose a spot to fire at in enemy seas: """, board)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
row_num = list(string.ascii_lowercase[:10])
col_num = 10
board = Board(row_num, col_num)
board.display_board()
guesses = 25
quit = 'q'
game = Game(guesses, quit)
game.take_shot("""
Choose a spot to fire at in enemy seas: """, board)
<|reserved_special_token_1|>
from battleship.board import Board
from battleship.game import Game
import string
row_num = list(string.ascii_lowercase[:10])
col_num = 10
board = Board(row_num, col_num)
board.display_board()
guesses = 25
quit = 'q'
game = Game(guesses, quit)
game.take_shot("""
Choose a spot to fire at in enemy seas: """, board)
<|reserved_special_token_1|>
from battleship.board import Board
from battleship.game import Game
import string
# Board
row_num = list(string.ascii_lowercase[:10]) # A-J
col_num = 10
board = Board(row_num, col_num)
board.display_board()
# Game
guesses = 25
quit = 'q'
game = Game(guesses, quit)
game.take_shot("\nChoose a spot to fire at in enemy seas: ", board)
# Ships
# 2x submarine = 1
# 2x destroyer = 2
# 1x cruiser = 3
# 1x battleship = 4
# 1x carrier = 5
|
flexible
|
{
"blob_id": "dd06847c3eb9af6e84f247f8f0dd03961d83688e",
"index": 9453,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nboard.display_board()\n<mask token>\ngame.take_shot(\"\"\"\nChoose a spot to fire at in enemy seas: \"\"\", board)\n",
"step-3": "<mask token>\nrow_num = list(string.ascii_lowercase[:10])\ncol_num = 10\nboard = Board(row_num, col_num)\nboard.display_board()\nguesses = 25\nquit = 'q'\ngame = Game(guesses, quit)\ngame.take_shot(\"\"\"\nChoose a spot to fire at in enemy seas: \"\"\", board)\n",
"step-4": "from battleship.board import Board\nfrom battleship.game import Game\nimport string\nrow_num = list(string.ascii_lowercase[:10])\ncol_num = 10\nboard = Board(row_num, col_num)\nboard.display_board()\nguesses = 25\nquit = 'q'\ngame = Game(guesses, quit)\ngame.take_shot(\"\"\"\nChoose a spot to fire at in enemy seas: \"\"\", board)\n",
"step-5": "from battleship.board import Board\nfrom battleship.game import Game\n\nimport string\n\n# Board\nrow_num = list(string.ascii_lowercase[:10]) # A-J\ncol_num = 10\nboard = Board(row_num, col_num) \nboard.display_board()\n\n# Game\nguesses = 25\nquit = 'q'\ngame = Game(guesses, quit)\ngame.take_shot(\"\\nChoose a spot to fire at in enemy seas: \", board)\n\n# Ships\n# 2x submarine = 1\n# 2x destroyer = 2\n# 1x cruiser = 3\n# 1x battleship = 4\n# 1x carrier = 5\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generateExampleBoletoPaymentsJson(n=1, next_day=False):
boletos = generateExampleBoletosJson(n=n)
boletos = starkbank.boleto.create(boletos)
payments = []
for boleto in boletos:
payment = deepcopy(example_payment)
payment.line = boleto.line
payment.scheduled = min(date.today() + timedelta(days=1) if
next_day else date.today(), (boleto.due - timedelta(hours=3)).
date())
payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(
)
payments.append(payment)
return payments
<|reserved_special_token_1|>
<|reserved_special_token_0|>
example_payment = BoletoPayment(line=
'34191.09008 61713.957308 71444.640008 2 83430000984732', scheduled=
'2020-02-29', description='loading a random account', tax_id=
'20.018.183/0001-80')
def generateExampleBoletoPaymentsJson(n=1, next_day=False):
boletos = generateExampleBoletosJson(n=n)
boletos = starkbank.boleto.create(boletos)
payments = []
for boleto in boletos:
payment = deepcopy(example_payment)
payment.line = boleto.line
payment.scheduled = min(date.today() + timedelta(days=1) if
next_day else date.today(), (boleto.due - timedelta(hours=3)).
date())
payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(
)
payments.append(payment)
return payments
<|reserved_special_token_1|>
from copy import deepcopy
from datetime import date, timedelta
from hashlib import sha256
import starkbank
from starkbank import BoletoPayment
from .boleto import generateExampleBoletosJson
example_payment = BoletoPayment(line=
'34191.09008 61713.957308 71444.640008 2 83430000984732', scheduled=
'2020-02-29', description='loading a random account', tax_id=
'20.018.183/0001-80')
def generateExampleBoletoPaymentsJson(n=1, next_day=False):
boletos = generateExampleBoletosJson(n=n)
boletos = starkbank.boleto.create(boletos)
payments = []
for boleto in boletos:
payment = deepcopy(example_payment)
payment.line = boleto.line
payment.scheduled = min(date.today() + timedelta(days=1) if
next_day else date.today(), (boleto.due - timedelta(hours=3)).
date())
payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(
)
payments.append(payment)
return payments
<|reserved_special_token_1|>
from copy import deepcopy
from datetime import date, timedelta
from hashlib import sha256
import starkbank
from starkbank import BoletoPayment
from .boleto import generateExampleBoletosJson
example_payment = BoletoPayment(
line="34191.09008 61713.957308 71444.640008 2 83430000984732",
scheduled="2020-02-29",
description="loading a random account",
tax_id="20.018.183/0001-80",
)
def generateExampleBoletoPaymentsJson(n=1, next_day=False):
boletos = generateExampleBoletosJson(n=n)
boletos = starkbank.boleto.create(boletos)
payments = []
for boleto in boletos:
payment = deepcopy(example_payment)
payment.line = boleto.line
payment.scheduled = min((date.today() + timedelta(days=1)) if next_day else date.today(), (boleto.due - timedelta(hours=3)).date())
payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest()
payments.append(payment)
return payments
|
flexible
|
{
"blob_id": "383d3b35fbfb7921111b28c3160173ce1c200387",
"index": 637,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef generateExampleBoletoPaymentsJson(n=1, next_day=False):\n boletos = generateExampleBoletosJson(n=n)\n boletos = starkbank.boleto.create(boletos)\n payments = []\n for boleto in boletos:\n payment = deepcopy(example_payment)\n payment.line = boleto.line\n payment.scheduled = min(date.today() + timedelta(days=1) if\n next_day else date.today(), (boleto.due - timedelta(hours=3)).\n date())\n payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(\n )\n payments.append(payment)\n return payments\n",
"step-3": "<mask token>\nexample_payment = BoletoPayment(line=\n '34191.09008 61713.957308 71444.640008 2 83430000984732', scheduled=\n '2020-02-29', description='loading a random account', tax_id=\n '20.018.183/0001-80')\n\n\ndef generateExampleBoletoPaymentsJson(n=1, next_day=False):\n boletos = generateExampleBoletosJson(n=n)\n boletos = starkbank.boleto.create(boletos)\n payments = []\n for boleto in boletos:\n payment = deepcopy(example_payment)\n payment.line = boleto.line\n payment.scheduled = min(date.today() + timedelta(days=1) if\n next_day else date.today(), (boleto.due - timedelta(hours=3)).\n date())\n payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(\n )\n payments.append(payment)\n return payments\n",
"step-4": "from copy import deepcopy\nfrom datetime import date, timedelta\nfrom hashlib import sha256\nimport starkbank\nfrom starkbank import BoletoPayment\nfrom .boleto import generateExampleBoletosJson\nexample_payment = BoletoPayment(line=\n '34191.09008 61713.957308 71444.640008 2 83430000984732', scheduled=\n '2020-02-29', description='loading a random account', tax_id=\n '20.018.183/0001-80')\n\n\ndef generateExampleBoletoPaymentsJson(n=1, next_day=False):\n boletos = generateExampleBoletosJson(n=n)\n boletos = starkbank.boleto.create(boletos)\n payments = []\n for boleto in boletos:\n payment = deepcopy(example_payment)\n payment.line = boleto.line\n payment.scheduled = min(date.today() + timedelta(days=1) if\n next_day else date.today(), (boleto.due - timedelta(hours=3)).\n date())\n payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest(\n )\n payments.append(payment)\n return payments\n",
"step-5": "from copy import deepcopy\nfrom datetime import date, timedelta\nfrom hashlib import sha256\nimport starkbank\nfrom starkbank import BoletoPayment\nfrom .boleto import generateExampleBoletosJson\n\n\nexample_payment = BoletoPayment(\n line=\"34191.09008 61713.957308 71444.640008 2 83430000984732\",\n scheduled=\"2020-02-29\",\n description=\"loading a random account\",\n tax_id=\"20.018.183/0001-80\",\n)\n\n\ndef generateExampleBoletoPaymentsJson(n=1, next_day=False):\n boletos = generateExampleBoletosJson(n=n)\n\n boletos = starkbank.boleto.create(boletos)\n\n payments = []\n for boleto in boletos:\n payment = deepcopy(example_payment)\n payment.line = boleto.line\n payment.scheduled = min((date.today() + timedelta(days=1)) if next_day else date.today(), (boleto.due - timedelta(hours=3)).date())\n payment.description = sha256(str(boleto.id).encode('utf-8')).hexdigest()\n payments.append(payment)\n return payments\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
pokerAssignments = {'2': 20, '3': 30, '4': 40, '5': 50, '6': 60, '7': 70, '8': 80, '9': 90, 'T': 100, 'J': 110, 'Q': 120, 'K': 130, 'A': 140, 'C': 0, 'S': 1, 'H': 2, 'D': 3} #Used to assign each card to a unique three-digit integer
configScoring = {(1, 1): 0, (1, 2): 1, (2, 2): 2, (1, 3): 3, (2, 3): 6, (1, 4): 7} #Tracks hand scores for (respectively) high card, pair, two pair, three-of-a-kind, full house, and four-of-a-kind
scoreValues = {0: 'High Card', 1: 'Pair', 2: '2 Pair', 3: '3 of a Kind', 4: 'Straight', 5: 'Flush', 6: 'Full House', 7: '4 of a Kind', 8: 'Straight Flush'} #This data object is purely to enhance readability by demonstrating what type of hand each hand score corresponds to
def initialize(): #initalizes hands_list, assigns each card in a hand to a unique three-digit integer
hands_file = open("euler54_poker.txt")
hands_string = hands_file.read()
tempList = []
newString = (hands_string.replace('\n', ' ')).replace(' ', '')
for i in range(0, len(newString), 2):
tempList.append(newString[i: i + 2])
hands_list = []
for i in range(0, len(tempList), 10): #generates list item for each hand of 10 cards
new_hand = []
for j in range(2): #generates list item for each player's cards
player_hand = []
for k in range(5):
player_hand.append(pokerAssignments[tempList[i + 5*j + k][0]] + pokerAssignments[tempList[i + 5*j + k][1]])
new_hand.append(player_hand)
hands_list.append(new_hand)
return hands_list
hands_list = initialize()
def check_flush(hand): # checks if a reverse sorted hand is a flush
suit = hand[0] % 10
for i in range(1, 5):
if hand[i] % 10 != suit:
return False
return True
def check_straight(hand): #checks if a reverse sorted hand is a straight
for i in range(1, 5):
if hand[i] // 10 != (hand[i - 1] // 10) - 1:
return False
return True
def check_copies(hand): #checks if a hand has any pairs, three of a kind, two pair, etc. and sorts it accordingly
config = []
hand.sort()
i = 0
while i < 5:
count = 1
j = 1
while i + j < 5 and (hand[i + j] // 10) == (hand[i] // 10):
count += 1
j += 1
config.append([count, hand[i] // 10])
i += j
if config != []: #sorts for comparison
config.sort()
for i in range(len(config)):
for j in range(5):
if (hand[j] // 10) == config[i][1]:
hand.insert(0, hand[j])
hand.pop(j + 1)
return hand, config[-2][0], config[-1][0]
def score_hand(hand): #returns a number 0-8 for the hand the player has and the hand properly sorted
hand.sort(reverse = True)
is_flush = check_flush(hand)
is_straight = check_straight(hand)
if is_flush and is_straight:
return hand, 8
elif is_flush:
return hand, 5
elif is_straight:
return hand, 4
else:
hand, config_one, config_two = check_copies(hand)
return hand, configScoring[config_one, config_two]
def compare(hand_one, hand_two): #returns the number of the winning player if players have same hand score (who has higher card in tiebreak?)
for i in range(5):
if hand_one[i] // 10 > hand_two[i] // 10:
return 1
elif hand_two[i] // 10 > hand_one[i] // 10:
return 2
return None
def main(hands):
p_one_wins = 0
for i in range(len(hands)):
p_one_hand, p_one_score = score_hand(hands[i][0])
p_two_hand, p_two_score = score_hand(hands[i][1])
if p_one_score > p_two_score:
p_one_wins += 1
elif p_one_score == p_two_score:
if compare(p_one_hand, p_two_hand) == 1:
p_one_wins += 1
return p_one_wins
print(main(hands_list))
|
normal
|
{
"blob_id": "a2a3e8d52fd467178460b178c5dbf9ccd72706e7",
"index": 8251,
"step-1": "<mask token>\n\n\ndef initialize():\n hands_file = open('euler54_poker.txt')\n hands_string = hands_file.read()\n tempList = []\n newString = hands_string.replace('\\n', ' ').replace(' ', '')\n for i in range(0, len(newString), 2):\n tempList.append(newString[i:i + 2])\n hands_list = []\n for i in range(0, len(tempList), 10):\n new_hand = []\n for j in range(2):\n player_hand = []\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5 * j + k]\n [0]] + pokerAssignments[tempList[i + 5 * j + k][1]])\n new_hand.append(player_hand)\n hands_list.append(new_hand)\n return hands_list\n\n\n<mask token>\n\n\ndef check_flush(hand):\n suit = hand[0] % 10\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n return True\n\n\ndef check_straight(hand):\n for i in range(1, 5):\n if hand[i] // 10 != hand[i - 1] // 10 - 1:\n return False\n return True\n\n\ndef check_copies(hand):\n config = []\n hand.sort()\n i = 0\n while i < 5:\n count = 1\n j = 1\n while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:\n count += 1\n j += 1\n config.append([count, hand[i] // 10])\n i += j\n if config != []:\n config.sort()\n for i in range(len(config)):\n for j in range(5):\n if hand[j] // 10 == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n return hand, config[-2][0], config[-1][0]\n\n\ndef score_hand(hand):\n hand.sort(reverse=True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n if is_flush and is_straight:\n return hand, 8\n elif is_flush:\n return hand, 5\n elif is_straight:\n return hand, 4\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\n\ndef compare(hand_one, hand_two):\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef initialize():\n hands_file = open('euler54_poker.txt')\n hands_string = hands_file.read()\n tempList = []\n newString = hands_string.replace('\\n', ' ').replace(' ', '')\n for i in range(0, len(newString), 2):\n tempList.append(newString[i:i + 2])\n hands_list = []\n for i in range(0, len(tempList), 10):\n new_hand = []\n for j in range(2):\n player_hand = []\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5 * j + k]\n [0]] + pokerAssignments[tempList[i + 5 * j + k][1]])\n new_hand.append(player_hand)\n hands_list.append(new_hand)\n return hands_list\n\n\n<mask token>\n\n\ndef check_flush(hand):\n suit = hand[0] % 10\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n return True\n\n\ndef check_straight(hand):\n for i in range(1, 5):\n if hand[i] // 10 != hand[i - 1] // 10 - 1:\n return False\n return True\n\n\ndef check_copies(hand):\n config = []\n hand.sort()\n i = 0\n while i < 5:\n count = 1\n j = 1\n while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:\n count += 1\n j += 1\n config.append([count, hand[i] // 10])\n i += j\n if config != []:\n config.sort()\n for i in range(len(config)):\n for j in range(5):\n if hand[j] // 10 == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n return hand, config[-2][0], config[-1][0]\n\n\ndef score_hand(hand):\n hand.sort(reverse=True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n if is_flush and is_straight:\n return hand, 8\n elif is_flush:\n return hand, 5\n elif is_straight:\n return hand, 4\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\n\ndef compare(hand_one, hand_two):\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n return None\n\n\ndef main(hands):\n p_one_wins = 0\n for i in range(len(hands)):\n p_one_hand, p_one_score = score_hand(hands[i][0])\n p_two_hand, p_two_score = score_hand(hands[i][1])\n if p_one_score > p_two_score:\n p_one_wins += 1\n elif p_one_score == p_two_score:\n if compare(p_one_hand, p_two_hand) == 1:\n p_one_wins += 1\n return p_one_wins\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef initialize():\n hands_file = open('euler54_poker.txt')\n hands_string = hands_file.read()\n tempList = []\n newString = hands_string.replace('\\n', ' ').replace(' ', '')\n for i in range(0, len(newString), 2):\n tempList.append(newString[i:i + 2])\n hands_list = []\n for i in range(0, len(tempList), 10):\n new_hand = []\n for j in range(2):\n player_hand = []\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5 * j + k]\n [0]] + pokerAssignments[tempList[i + 5 * j + k][1]])\n new_hand.append(player_hand)\n hands_list.append(new_hand)\n return hands_list\n\n\n<mask token>\n\n\ndef check_flush(hand):\n suit = hand[0] % 10\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n return True\n\n\ndef check_straight(hand):\n for i in range(1, 5):\n if hand[i] // 10 != hand[i - 1] // 10 - 1:\n return False\n return True\n\n\ndef check_copies(hand):\n config = []\n hand.sort()\n i = 0\n while i < 5:\n count = 1\n j = 1\n while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:\n count += 1\n j += 1\n config.append([count, hand[i] // 10])\n i += j\n if config != []:\n config.sort()\n for i in range(len(config)):\n for j in range(5):\n if hand[j] // 10 == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n return hand, config[-2][0], config[-1][0]\n\n\ndef score_hand(hand):\n hand.sort(reverse=True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n if is_flush and is_straight:\n return hand, 8\n elif is_flush:\n return hand, 5\n elif is_straight:\n return hand, 4\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\n\ndef compare(hand_one, hand_two):\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n return None\n\n\ndef main(hands):\n p_one_wins = 0\n for i in range(len(hands)):\n p_one_hand, p_one_score = score_hand(hands[i][0])\n p_two_hand, p_two_score = score_hand(hands[i][1])\n if p_one_score > p_two_score:\n p_one_wins += 1\n elif p_one_score == p_two_score:\n if compare(p_one_hand, p_two_hand) == 1:\n p_one_wins += 1\n return p_one_wins\n\n\nprint(main(hands_list))\n",
"step-4": "pokerAssignments = {'2': 20, '3': 30, '4': 40, '5': 50, '6': 60, '7': 70,\n '8': 80, '9': 90, 'T': 100, 'J': 110, 'Q': 120, 'K': 130, 'A': 140, 'C':\n 0, 'S': 1, 'H': 2, 'D': 3}\nconfigScoring = {(1, 1): 0, (1, 2): 1, (2, 2): 2, (1, 3): 3, (2, 3): 6, (1,\n 4): 7}\nscoreValues = {(0): 'High Card', (1): 'Pair', (2): '2 Pair', (3):\n '3 of a Kind', (4): 'Straight', (5): 'Flush', (6): 'Full House', (7):\n '4 of a Kind', (8): 'Straight Flush'}\n\n\ndef initialize():\n hands_file = open('euler54_poker.txt')\n hands_string = hands_file.read()\n tempList = []\n newString = hands_string.replace('\\n', ' ').replace(' ', '')\n for i in range(0, len(newString), 2):\n tempList.append(newString[i:i + 2])\n hands_list = []\n for i in range(0, len(tempList), 10):\n new_hand = []\n for j in range(2):\n player_hand = []\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5 * j + k]\n [0]] + pokerAssignments[tempList[i + 5 * j + k][1]])\n new_hand.append(player_hand)\n hands_list.append(new_hand)\n return hands_list\n\n\nhands_list = initialize()\n\n\ndef check_flush(hand):\n suit = hand[0] % 10\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n return True\n\n\ndef check_straight(hand):\n for i in range(1, 5):\n if hand[i] // 10 != hand[i - 1] // 10 - 1:\n return False\n return True\n\n\ndef check_copies(hand):\n config = []\n hand.sort()\n i = 0\n while i < 5:\n count = 1\n j = 1\n while i + j < 5 and hand[i + j] // 10 == hand[i] // 10:\n count += 1\n j += 1\n config.append([count, hand[i] // 10])\n i += j\n if config != []:\n config.sort()\n for i in range(len(config)):\n for j in range(5):\n if hand[j] // 10 == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n return hand, config[-2][0], config[-1][0]\n\n\ndef score_hand(hand):\n hand.sort(reverse=True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n if is_flush and is_straight:\n return hand, 8\n elif is_flush:\n return hand, 5\n elif is_straight:\n return hand, 4\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\n\ndef compare(hand_one, hand_two):\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n return None\n\n\ndef main(hands):\n p_one_wins = 0\n for i in range(len(hands)):\n p_one_hand, p_one_score = score_hand(hands[i][0])\n p_two_hand, p_two_score = score_hand(hands[i][1])\n if p_one_score > p_two_score:\n p_one_wins += 1\n elif p_one_score == p_two_score:\n if compare(p_one_hand, p_two_hand) == 1:\n p_one_wins += 1\n return p_one_wins\n\n\nprint(main(hands_list))\n",
"step-5": "pokerAssignments = {'2': 20, '3': 30, '4': 40, '5': 50, '6': 60, '7': 70, '8': 80, '9': 90, 'T': 100, 'J': 110, 'Q': 120, 'K': 130, 'A': 140, 'C': 0, 'S': 1, 'H': 2, 'D': 3} #Used to assign each card to a unique three-digit integer\n\nconfigScoring = {(1, 1): 0, (1, 2): 1, (2, 2): 2, (1, 3): 3, (2, 3): 6, (1, 4): 7} #Tracks hand scores for (respectively) high card, pair, two pair, three-of-a-kind, full house, and four-of-a-kind\n\nscoreValues = {0: 'High Card', 1: 'Pair', 2: '2 Pair', 3: '3 of a Kind', 4: 'Straight', 5: 'Flush', 6: 'Full House', 7: '4 of a Kind', 8: 'Straight Flush'} #This data object is purely to enhance readability by demonstrating what type of hand each hand score corresponds to\n\ndef initialize(): #initalizes hands_list, assigns each card in a hand to a unique three-digit integer\n hands_file = open(\"euler54_poker.txt\")\n hands_string = hands_file.read()\n tempList = []\n newString = (hands_string.replace('\\n', ' ')).replace(' ', '')\n\n for i in range(0, len(newString), 2):\n tempList.append(newString[i: i + 2])\n\n hands_list = []\n\n for i in range(0, len(tempList), 10): #generates list item for each hand of 10 cards\n new_hand = []\n\n for j in range(2): #generates list item for each player's cards\n player_hand = []\n\n for k in range(5):\n player_hand.append(pokerAssignments[tempList[i + 5*j + k][0]] + pokerAssignments[tempList[i + 5*j + k][1]])\n\n new_hand.append(player_hand)\n\n hands_list.append(new_hand)\n\n return hands_list\n\nhands_list = initialize()\n\ndef check_flush(hand): # checks if a reverse sorted hand is a flush\n suit = hand[0] % 10\n\n for i in range(1, 5):\n if hand[i] % 10 != suit:\n return False\n\n return True\n\ndef check_straight(hand): #checks if a reverse sorted hand is a straight\n\n for i in range(1, 5):\n\n if hand[i] // 10 != (hand[i - 1] // 10) - 1:\n return False\n\n return True\n\ndef check_copies(hand): #checks if a hand has any pairs, three of a kind, two pair, etc. and sorts it accordingly\n config = []\n hand.sort()\n\n i = 0\n while i < 5:\n count = 1\n j = 1\n\n while i + j < 5 and (hand[i + j] // 10) == (hand[i] // 10):\n count += 1\n j += 1\n\n config.append([count, hand[i] // 10])\n i += j\n\n if config != []: #sorts for comparison\n config.sort()\n\n for i in range(len(config)):\n\n for j in range(5):\n\n if (hand[j] // 10) == config[i][1]:\n hand.insert(0, hand[j])\n hand.pop(j + 1)\n\n return hand, config[-2][0], config[-1][0]\n\ndef score_hand(hand): #returns a number 0-8 for the hand the player has and the hand properly sorted\n hand.sort(reverse = True)\n is_flush = check_flush(hand)\n is_straight = check_straight(hand)\n\n if is_flush and is_straight:\n return hand, 8\n\n elif is_flush:\n return hand, 5\n\n elif is_straight:\n return hand, 4\n\n else:\n hand, config_one, config_two = check_copies(hand)\n return hand, configScoring[config_one, config_two]\n\ndef compare(hand_one, hand_two): #returns the number of the winning player if players have same hand score (who has higher card in tiebreak?)\n\n for i in range(5):\n if hand_one[i] // 10 > hand_two[i] // 10:\n return 1\n\n elif hand_two[i] // 10 > hand_one[i] // 10:\n return 2\n\n return None\n\ndef main(hands):\n p_one_wins = 0\n\n for i in range(len(hands)):\n p_one_hand, p_one_score = score_hand(hands[i][0])\n p_two_hand, p_two_score = score_hand(hands[i][1])\n\n if p_one_score > p_two_score:\n p_one_wins += 1\n\n elif p_one_score == p_two_score:\n if compare(p_one_hand, p_two_hand) == 1:\n p_one_wins += 1\n\n return p_one_wins\n\nprint(main(hands_list))\n",
"step-ids": [
6,
7,
8,
9,
10
]
}
|
[
6,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def solution(n):
arr = [[(0) for _ in range(i + 1)] for i in range(n)]
size = n
num = 0
x = 0
y = -1
while True:
for _ in range(size):
num += 1
y += 1
arr[y][x] = num
size -= 1
if size == 0:
break
for _ in range(size):
num += 1
x += 1
arr[y][x] = num
size -= 1
if size == 0:
break
for _ in range(size):
num += 1
x -= 1
y -= 1
arr[y][x] = num
size -= 1
if size == 0:
break
answer = []
for i in arr:
answer.extend(i)
return answer
<|reserved_special_token_1|>
# https://daphne-dev.github.io/2020/09/24/algo-022/
def solution(n):
arr = [[0 for _ in range(i+1)] for i in range(n)]
# 경우의수 는 3가지
# 1. y축이 증가하면서 수가 증가
# 2. x축이 증가하면서 수가 증가
# 3. y,x축이 감소하면서 수가 증가
size = n
num = 0
x = 0
y = -1
while True:
# 1번
for _ in range(size):
num += 1
y += 1
arr[y][x] = num
size-=1
if size == 0:
break
# 2번
for _ in range(size):
num += 1
x += 1
arr[y][x] = num
size-=1
if size == 0:
break
# 3번
for _ in range(size):
num += 1
x -= 1
y -= 1
arr[y][x] = num
size-=1
if size == 0:
break
answer = []
for i in arr:
answer.extend(i)
return answer
# print(solution(4))
|
flexible
|
{
"blob_id": "3c029adb59cd6db1e3d4a22e6561f5e2ae827d60",
"index": 2465,
"step-1": "<mask token>\n",
"step-2": "def solution(n):\n arr = [[(0) for _ in range(i + 1)] for i in range(n)]\n size = n\n num = 0\n x = 0\n y = -1\n while True:\n for _ in range(size):\n num += 1\n y += 1\n arr[y][x] = num\n size -= 1\n if size == 0:\n break\n for _ in range(size):\n num += 1\n x += 1\n arr[y][x] = num\n size -= 1\n if size == 0:\n break\n for _ in range(size):\n num += 1\n x -= 1\n y -= 1\n arr[y][x] = num\n size -= 1\n if size == 0:\n break\n answer = []\n for i in arr:\n answer.extend(i)\n return answer\n",
"step-3": "# https://daphne-dev.github.io/2020/09/24/algo-022/\ndef solution(n):\n arr = [[0 for _ in range(i+1)] for i in range(n)]\n # 경우의수 는 3가지\n # 1. y축이 증가하면서 수가 증가\n # 2. x축이 증가하면서 수가 증가\n # 3. y,x축이 감소하면서 수가 증가\n size = n\n num = 0\n x = 0\n y = -1\n while True:\n # 1번\n for _ in range(size):\n num += 1\n y += 1\n arr[y][x] = num\n size-=1\n if size == 0:\n break\n # 2번\n for _ in range(size):\n num += 1\n x += 1\n arr[y][x] = num\n size-=1\n if size == 0:\n break\n # 3번\n for _ in range(size):\n num += 1\n x -= 1\n y -= 1\n arr[y][x] = num\n size-=1\n if size == 0:\n break\n answer = []\n for i in arr:\n answer.extend(i)\n return answer\n# print(solution(4))",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
count = int(input())
for i in range(1, count + 1):
something = '='
num1, num2 = map(int, input().split())
if num1 > num2:
something = '>'
elif num1 < num2:
something = '<'
print(f'#{i} {something}')
|
normal
|
{
"blob_id": "abcefa0a3312e158517ec8a15421d1d07220da6a",
"index": 5271,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, count + 1):\n something = '='\n num1, num2 = map(int, input().split())\n if num1 > num2:\n something = '>'\n elif num1 < num2:\n something = '<'\n print(f'#{i} {something}')\n",
"step-3": "count = int(input())\nfor i in range(1, count + 1):\n something = '='\n num1, num2 = map(int, input().split())\n if num1 > num2:\n something = '>'\n elif num1 < num2:\n something = '<'\n print(f'#{i} {something}')\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from collections import Counter
import numpy as np
import random
import torch
import BidModel
from douzero.env.game import GameEnv
env_version = "3.2"
env_url = "http://od.vcccz.com/hechuan/env.py"
Card2Column = {3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7,
11: 8, 12: 9, 13: 10, 14: 11, 17: 12}
NumOnes2Array = {0: np.array([0, 0, 0, 0]),
1: np.array([1, 0, 0, 0]),
2: np.array([1, 1, 0, 0]),
3: np.array([1, 1, 1, 0]),
4: np.array([1, 1, 1, 1])}
deck = []
for i in range(3, 15):
deck.extend([i for _ in range(4)])
deck.extend([17 for _ in range(4)])
deck.extend([20, 30])
class Env:
"""
Doudizhu multi-agent wrapper
"""
def __init__(self, objective):
"""
Objective is wp/adp/logadp. It indicates whether considers
bomb in reward calculation. Here, we use dummy agents.
This is because, in the orignial game, the players
are `in` the game. Here, we want to isolate
players and environments to have a more gym style
interface. To achieve this, we use dummy players
to play. For each move, we tell the corresponding
dummy player which action to play, then the player
will perform the actual action in the game engine.
"""
self.objective = objective
# Initialize players
# We use three dummy player for the target position
self.players = {}
for position in ['landlord', 'landlord_up', 'landlord_down']:
self.players[position] = DummyAgent(position)
# Initialize the internal environment
self._env = GameEnv(self.players)
self.total_round = 0
self.force_bid = 0
self.infoset = None
def reset(self, model, device, flags=None):
"""
Every time reset is called, the environment
will be re-initialized with a new deck of cards.
This function is usually called when a game is over.
"""
self._env.reset()
# Randomly shuffle the deck
if model is None:
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = {'landlord': _deck[:20],
'landlord_up': _deck[20:37],
'landlord_down': _deck[37:54],
'three_landlord_cards': _deck[17:20],
}
for key in card_play_data:
card_play_data[key].sort()
self._env.card_play_init(card_play_data)
self.infoset = self._game_infoset
return get_obs(self.infoset)
else:
self.total_round += 1
bid_done = False
card_play_data = []
landlord_cards = []
last_bid = 0
bid_count = 0
player_ids = {}
bid_info = None
bid_obs_buffer = []
multiply_obs_buffer = []
bid_limit = 3
force_bid = False
while not bid_done:
bid_limit -= 1
bid_obs_buffer.clear()
multiply_obs_buffer.clear()
_deck = deck.copy()
np.random.shuffle(_deck)
card_play_data = [
_deck[:17],
_deck[17:34],
_deck[34:51],
]
for i in range(3):
card_play_data[i].sort()
landlord_cards = _deck[51:54]
landlord_cards.sort()
bid_info = np.array([[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1],
[-1, -1, -1]])
bidding_player = random.randint(0, 2)
# bidding_player = 0 # debug
first_bid = -1
last_bid = -1
bid_count = 0
if bid_limit <= 0:
force_bid = True
for r in range(3):
bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])
with torch.no_grad():
action = model.forward("bidding", torch.tensor(bidding_obs["z_batch"], device=device),
torch.tensor(bidding_obs["x_batch"], device=device), flags=flags)
if bid_limit <= 0:
wr = BidModel.predict_env(card_play_data[bidding_player])
if wr >= 0.7:
action = {"action": 1} # debug
bid_limit += 1
bid_obs_buffer.append({
"x_batch": bidding_obs["x_batch"][action["action"]],
"z_batch": bidding_obs["z_batch"][action["action"]],
"pid": bidding_player
})
if action["action"] == 1:
last_bid = bidding_player
bid_count += 1
if first_bid == -1:
first_bid = bidding_player
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
else:
bid_info[r] = [0, 0, 0]
bidding_player = (bidding_player + 1) % 3
one_count = np.count_nonzero(bid_info == 1)
if one_count == 0:
continue
elif one_count > 1:
r = 3
bidding_player = first_bid
bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])
with torch.no_grad():
action = model.forward("bidding", torch.tensor(bidding_obs["z_batch"], device=device),
torch.tensor(bidding_obs["x_batch"], device=device), flags=flags)
bid_obs_buffer.append({
"x_batch": bidding_obs["x_batch"][action["action"]],
"z_batch": bidding_obs["z_batch"][action["action"]],
"pid": bidding_player
})
if action["action"] == 1:
last_bid = bidding_player
bid_count += 1
for p in range(3):
if p == bidding_player:
bid_info[r][p] = 1
else:
bid_info[r][p] = 0
break
card_play_data[last_bid].extend(landlord_cards)
card_play_data = {'landlord': card_play_data[last_bid],
'landlord_up': card_play_data[(last_bid - 1) % 3],
'landlord_down': card_play_data[(last_bid + 1) % 3],
'three_landlord_cards': landlord_cards,
}
card_play_data["landlord"].sort()
player_ids = {
'landlord': last_bid,
'landlord_up': (last_bid - 1) % 3,
'landlord_down': (last_bid + 1) % 3,
}
player_positions = {
last_bid: 'landlord',
(last_bid - 1) % 3: 'landlord_up',
(last_bid + 1) % 3: 'landlord_down'
}
for bid_obs in bid_obs_buffer:
bid_obs.update({"position": player_positions[bid_obs["pid"]]})
# Initialize the cards
self._env.card_play_init(card_play_data)
multiply_map = [
np.array([1, 0, 0]),
np.array([0, 1, 0]),
np.array([0, 0, 1])
]
for pos in ["landlord", "landlord_up", "landlord_down"]:
pid = player_ids[pos]
self._env.info_sets[pos].player_id = pid
self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) % 3, pid, (pid + 1) % 3]]
self._env.bid_count = bid_count
# multiply_obs = _get_obs_for_multiply(pos, self._env.info_sets[pos].bid_info, card_play_data[pos],
# landlord_cards)
# action = model.forward(pos, torch.tensor(multiply_obs["z_batch"], device=device),
# torch.tensor(multiply_obs["x_batch"], device=device), flags=flags)
# multiply_obs_buffer.append({
# "x_batch": multiply_obs["x_batch"][action["action"]],
# "z_batch": multiply_obs["z_batch"][action["action"]],
# "position": pos
# })
action = {"action": 0}
self._env.info_sets[pos].multiply_info = multiply_map[action["action"]]
self._env.multiply_count[pos] = action["action"]
self.infoset = self._game_infoset
if force_bid:
self.force_bid += 1
if self.total_round % 100 == 0:
print("发牌情况: %i/%i %.1f%%" % (self.force_bid, self.total_round, self.force_bid / self.total_round * 100))
self.force_bid = 0
self.total_round = 0
return get_obs(self.infoset), {
"bid_obs_buffer": bid_obs_buffer,
"multiply_obs_buffer": multiply_obs_buffer
}
def step(self, action):
"""
Step function takes as input the action, which
is a list of integers, and output the next obervation,
reward, and a Boolean variable indicating whether the
current game is finished. It also returns an empty
dictionary that is reserved to pass useful information.
"""
assert action in self.infoset.legal_actions
self.players[self._acting_player_position].set_action(action)
self._env.step()
self.infoset = self._game_infoset
done = False
reward = 0.0
if self._game_over:
done = True
reward = {
"play": {
"landlord": self._get_reward("landlord"),
"landlord_up": self._get_reward("landlord_up"),
"landlord_down": self._get_reward("landlord_down")
},
"bid": {
"landlord": self._get_reward_bidding("landlord")*2,
"landlord_up": self._get_reward_bidding("landlord_up"),
"landlord_down": self._get_reward_bidding("landlord_down")
}
}
obs = None
else:
obs = get_obs(self.infoset)
return obs, reward, done, {}
def _get_reward(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
self_bomb_num = self._env.pos_bomb_num[pos]
if winner == 'landlord':
if self.objective == 'adp':
return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8
elif self.objective == 'logadp':
return (1.0 - self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4
else:
return 1.0 - self._env.step_count * 0.0033
else:
if self.objective == 'adp':
return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8
elif self.objective == 'logadp':
return (-1.0 + self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4
else:
return -1.0 + self._env.step_count * 0.0033
def _get_reward_bidding(self, pos):
"""
This function is called in the end of each
game. It returns either 1/-1 for win/loss,
or ADP, i.e., every bomb will double the score.
"""
winner = self._game_winner
bomb_num = self._game_bomb_num
if winner == 'landlord':
return 1.0 * 2**(self._env.bid_count-1) / 8
else:
return -1.0 * 2**(self._env.bid_count-1) / 8
@property
def _game_infoset(self):
"""
Here, inforset is defined as all the information
in the current situation, incuding the hand cards
of all the players, all the historical moves, etc.
That is, it contains perferfect infomation. Later,
we will use functions to extract the observable
information from the views of the three players.
"""
return self._env.game_infoset
@property
def _game_bomb_num(self):
"""
The number of bombs played so far. This is used as
a feature of the neural network and is also used to
calculate ADP.
"""
return self._env.get_bomb_num()
@property
def _game_winner(self):
""" A string of landlord/peasants
"""
return self._env.get_winner()
@property
def _acting_player_position(self):
"""
The player that is active. It can be landlord,
landlod_down, or landlord_up.
"""
return self._env.acting_player_position
@property
def _game_over(self):
""" Returns a Boolean
"""
return self._env.game_over
class DummyAgent(object):
"""
Dummy agent is designed to easily interact with the
game engine. The agent will first be told what action
to perform. Then the environment will call this agent
to perform the actual action. This can help us to
isolate environment and agents towards a gym like
interface.
"""
def __init__(self, position):
self.position = position
self.action = None
def act(self, infoset):
"""
Simply return the action that is set previously.
"""
assert self.action in infoset.legal_actions
return self.action
def set_action(self, action):
"""
The environment uses this function to tell
the dummy agent what to do.
"""
self.action = action
def get_obs(infoset, use_general=True):
"""
This function obtains observations with imperfect information
from the infoset. It has three branches since we encode
different features for different positions.
This function will return dictionary named `obs`. It contains
several fields. These fields will be used to train the model.
One can play with those features to improve the performance.
`position` is a string that can be landlord/landlord_down/landlord_up
`x_batch` is a batch of features (excluding the hisorical moves).
It also encodes the action feature
`z_batch` is a batch of features with hisorical moves only.
`legal_actions` is the legal moves
`x_no_action`: the features (exluding the hitorical moves and
the action features). It does not have the batch dim.
`z`: same as z_batch but not a batch.
"""
if use_general:
if infoset.player_position not in ["landlord", "landlord_up", "landlord_down"]:
raise ValueError('')
return _get_obs_general(infoset, infoset.player_position)
else:
if infoset.player_position == 'landlord':
return _get_obs_landlord(infoset)
elif infoset.player_position == 'landlord_up':
return _get_obs_landlord_up(infoset)
elif infoset.player_position == 'landlord_down':
return _get_obs_landlord_down(infoset)
else:
raise ValueError('')
def _get_one_hot_array(num_left_cards, max_num_cards):
"""
A utility function to obtain one-hot endoding
"""
one_hot = np.zeros(max_num_cards)
if num_left_cards > 0:
one_hot[num_left_cards - 1] = 1
return one_hot
def _cards2array(list_cards):
"""
A utility function that transforms the actions, i.e.,
A list of integers into card matrix. Here we remove
the six entries that are always zero and flatten the
the representations.
"""
if len(list_cards) == 0:
return np.zeros(54, dtype=np.int8)
matrix = np.zeros([4, 13], dtype=np.int8)
jokers = np.zeros(2, dtype=np.int8)
counter = Counter(list_cards)
for card, num_times in counter.items():
if card < 20:
matrix[:, Card2Column[card]] = NumOnes2Array[num_times]
elif card == 20:
jokers[0] = 1
elif card == 30:
jokers[1] = 1
return np.concatenate((matrix.flatten('F'), jokers))
# def _action_seq_list2array(action_seq_list):
# """
# A utility function to encode the historical moves.
# We encode the historical 15 actions. If there is
# no 15 actions, we pad the features with 0. Since
# three moves is a round in DouDizhu, we concatenate
# the representations for each consecutive three moves.
# Finally, we obtain a 5x162 matrix, which will be fed
# into LSTM for encoding.
# """
# action_seq_array = np.zeros((len(action_seq_list), 54))
# for row, list_cards in enumerate(action_seq_list):
# action_seq_array[row, :] = _cards2array(list_cards)
# # action_seq_array = action_seq_array.reshape(5, 162)
# return action_seq_array
def _action_seq_list2array(action_seq_list, new_model=True):
"""
A utility function to encode the historical moves.
We encode the historical 15 actions. If there is
no 15 actions, we pad the features with 0. Since
three moves is a round in DouDizhu, we concatenate
the representations for each consecutive three moves.
Finally, we obtain a 5x162 matrix, which will be fed
into LSTM for encoding.
"""
if new_model:
position_map = {"landlord": 0, "landlord_up": 1, "landlord_down": 2}
action_seq_array = np.ones((len(action_seq_list), 54)) * -1 # Default Value -1 for not using area
for row, list_cards in enumerate(action_seq_list):
if list_cards != []:
action_seq_array[row, :54] = _cards2array(list_cards[1])
else:
action_seq_array = np.zeros((len(action_seq_list), 54))
for row, list_cards in enumerate(action_seq_list):
if list_cards != []:
action_seq_array[row, :] = _cards2array(list_cards[1])
action_seq_array = action_seq_array.reshape(5, 162)
return action_seq_array
# action_seq_array = np.zeros((len(action_seq_list), 54))
# for row, list_cards in enumerate(action_seq_list):
# if list_cards != []:
# action_seq_array[row, :] = _cards2array(list_cards[1])
# return action_seq_array
def _process_action_seq(sequence, length=15, new_model=True):
"""
A utility function encoding historical moves. We
encode 15 moves. If there is no 15 moves, we pad
with zeros.
"""
sequence = sequence[-length:].copy()
if new_model:
sequence = sequence[::-1]
if len(sequence) < length:
empty_sequence = [[] for _ in range(length - len(sequence))]
empty_sequence.extend(sequence)
sequence = empty_sequence
return sequence
def _get_one_hot_bomb(bomb_num):
"""
A utility function to encode the number of bombs
into one-hot representation.
"""
one_hot = np.zeros(15)
one_hot[bomb_num] = 1
return one_hot
def _get_obs_landlord(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
last_action_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
last_action,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_up(infoset):
"""
Obttain the landlord_up features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(
infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(
last_landlord_action[np.newaxis, :],
num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_teammate_action = _cards2array(
infoset.last_move_dict['landlord_down'])
last_teammate_action_batch = np.repeat(
last_teammate_action[np.newaxis, :],
num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
teammate_num_cards_left_batch = np.repeat(
teammate_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
teammate_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
teammate_played_cards_batch = np.repeat(
teammate_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
landlord_played_cards_batch,
teammate_played_cards_batch,
last_action_batch,
last_landlord_action_batch,
last_teammate_action_batch,
landlord_num_cards_left_batch,
teammate_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
landlord_played_cards,
teammate_played_cards,
last_action,
last_landlord_action,
last_teammate_action,
landlord_num_cards_left,
teammate_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord_up',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_down(infoset):
"""
Obttain the landlord_down features. See Table 5 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
last_landlord_action = _cards2array(
infoset.last_move_dict['landlord'])
last_landlord_action_batch = np.repeat(
last_landlord_action[np.newaxis, :],
num_legal_actions, axis=0)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_teammate_action = _cards2array(
infoset.last_move_dict['landlord_up'])
last_teammate_action_batch = np.repeat(
last_teammate_action[np.newaxis, :],
num_legal_actions, axis=0)
teammate_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
teammate_num_cards_left_batch = np.repeat(
teammate_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
teammate_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
teammate_played_cards_batch = np.repeat(
teammate_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
landlord_played_cards_batch,
teammate_played_cards_batch,
last_action_batch,
last_landlord_action_batch,
last_teammate_action_batch,
landlord_num_cards_left_batch,
teammate_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
landlord_played_cards,
teammate_played_cards,
last_action,
last_landlord_action,
last_teammate_action,
landlord_num_cards_left,
teammate_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord_down',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_landlord_withbid(infoset):
"""
Obttain the landlord features. See Table 4 in
https://arxiv.org/pdf/2106.06135.pdf
"""
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((my_handcards_batch,
other_handcards_batch,
last_action_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
my_action_batch))
x_no_action = np.hstack((my_handcards,
other_handcards,
last_action,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 15, False), False)
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': 'landlord',
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_general1(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ["landlord", "landlord_up", "landlord_up"]:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch, # 3
my_handcards_batch, # 54
other_handcards_batch, # 54
three_landlord_cards_batch, # 54
last_action_batch, # 54
landlord_played_cards_batch, # 54
landlord_up_played_cards_batch, # 54
landlord_down_played_cards_batch, # 54
landlord_num_cards_left_batch, # 20
landlord_up_num_cards_left_batch, # 17
landlord_down_num_cards_left_batch, # 17
bomb_num_batch, # 15
bid_info_batch, # 12
multiply_info_batch, # 3
my_action_batch)) # 54
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num,
bid_info,
multiply_info))
z = _action_seq_list2array(_process_action_seq(
infoset.card_play_action_seq, 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': position,
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def _get_obs_general(infoset, position):
num_legal_actions = len(infoset.legal_actions)
my_handcards = _cards2array(infoset.player_hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards = _cards2array(infoset.other_hand_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(infoset.bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array(infoset.multiply_info)
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array(infoset.three_landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array(infoset.last_move)
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j, action in enumerate(infoset.legal_actions):
my_action_batch[j, :] = _cards2array(action)
landlord_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord'], 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_up'], 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(
infoset.num_cards_left_dict['landlord_down'], 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
other_handcards_left_list = []
for pos in ["landlord", "landlord_up", "landlord_up"]:
if pos != position:
other_handcards_left_list.extend(infoset.all_handcards[pos])
landlord_played_cards = _cards2array(
infoset.played_cards['landlord'])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array(
infoset.played_cards['landlord_up'])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array(
infoset.played_cards['landlord_down'])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(
infoset.bomb_num)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
num_cards_left = np.hstack((
landlord_num_cards_left, # 20
landlord_up_num_cards_left, # 17
landlord_down_num_cards_left))
x_batch = np.hstack((
bid_info_batch, # 12
multiply_info_batch)) # 3
x_no_action = np.hstack((
bid_info,
multiply_info))
z =np.vstack((
num_cards_left,
my_handcards, # 54
other_handcards, # 54
three_landlord_cards, # 54
landlord_played_cards, # 54
landlord_up_played_cards, # 54
landlord_down_played_cards, # 54
_action_seq_list2array(_process_action_seq(infoset.card_play_action_seq, 32))
))
_z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
my_action_batch = my_action_batch[:,np.newaxis,:]
z_batch = np.zeros([len(_z_batch),40,54],int)
for i in range(0,len(_z_batch)):
z_batch[i] = np.vstack((my_action_batch[i],_z_batch[i]))
obs = {
'position': position,
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': infoset.legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
}
return obs
def gen_bid_legal_actions(player_id, bid_info):
self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id + 1) % 3]]
curr_round = -1
for r in range(4):
if -1 in self_bid_info[r]:
curr_round = r
break
bid_actions = []
if curr_round != -1:
self_bid_info[curr_round] = [0, 0, 0]
bid_actions.append(np.array(self_bid_info).flatten())
self_bid_info[curr_round] = [0, 1, 0]
bid_actions.append(np.array(self_bid_info).flatten())
return np.array(bid_actions)
def _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_info = np.array([0, 0, 0])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = bid_legal_actions
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],
num_legal_actions, axis=0)
three_landlord_cards = _cards2array([])
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(2):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch,
my_handcards_batch,
other_handcards_batch,
three_landlord_cards_batch,
last_action_batch,
landlord_played_cards_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_num_cards_left_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
bid_info_batch,
multiply_info_batch,
my_action_batch))
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': bid_legal_actions,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
"bid_info_batch": bid_info_batch.astype(np.int8),
"multiply_info": multiply_info.astype(np.int8)
}
return obs
def _get_obs_for_bid(player_id, bid_info, hand_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 2
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)
bid_info = bid_legal_actions[0]
bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])
x_batch = np.hstack((my_handcards_batch,
bid_info_batch))
x_no_action = np.hstack((my_handcards))
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': np.array([0,0]),
'legal_actions': bid_legal_actions,
'x_no_action': x_no_action.astype(np.int8),
"bid_info_batch": bid_info_batch.astype(np.int8)
}
return obs
def _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):
all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,
8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,
12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]
num_legal_actions = 3
my_handcards = _cards2array(hand_cards)
my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],
num_legal_actions, axis=0)
other_cards = []
other_cards.extend(all_cards)
for card in hand_cards:
other_cards.remove(card)
other_handcards = _cards2array(other_cards)
other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],
num_legal_actions, axis=0)
position_map = {
"landlord": [1, 0, 0],
"landlord_up": [0, 1, 0],
"landlord_down": [0, 0, 1]
}
position_info = np.array(position_map[position])
position_info_batch = np.repeat(position_info[np.newaxis, :],
num_legal_actions, axis=0)
bid_info = np.array(bid_info).flatten()
bid_info_batch = np.repeat(bid_info[np.newaxis, :],
num_legal_actions, axis=0)
multiply_info = np.array([0, 0, 0])
multiply_info_batch = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
three_landlord_cards = _cards2array(landlord_cards)
three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],
num_legal_actions, axis=0)
last_action = _cards2array([])
last_action_batch = np.repeat(last_action[np.newaxis, :],
num_legal_actions, axis=0)
my_action_batch = np.zeros(my_handcards_batch.shape)
for j in range(num_legal_actions):
my_action_batch[j, :] = _cards2array([])
landlord_num_cards_left = _get_one_hot_array(0, 20)
landlord_num_cards_left_batch = np.repeat(
landlord_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_num_cards_left = _get_one_hot_array(0, 17)
landlord_up_num_cards_left_batch = np.repeat(
landlord_up_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_num_cards_left = _get_one_hot_array(0, 17)
landlord_down_num_cards_left_batch = np.repeat(
landlord_down_num_cards_left[np.newaxis, :],
num_legal_actions, axis=0)
landlord_played_cards = _cards2array([])
landlord_played_cards_batch = np.repeat(
landlord_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_up_played_cards = _cards2array([])
landlord_up_played_cards_batch = np.repeat(
landlord_up_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
landlord_down_played_cards = _cards2array([])
landlord_down_played_cards_batch = np.repeat(
landlord_down_played_cards[np.newaxis, :],
num_legal_actions, axis=0)
bomb_num = _get_one_hot_bomb(0)
bomb_num_batch = np.repeat(
bomb_num[np.newaxis, :],
num_legal_actions, axis=0)
x_batch = np.hstack((position_info_batch,
my_handcards_batch,
other_handcards_batch,
three_landlord_cards_batch,
last_action_batch,
landlord_played_cards_batch,
landlord_up_played_cards_batch,
landlord_down_played_cards_batch,
landlord_num_cards_left_batch,
landlord_up_num_cards_left_batch,
landlord_down_num_cards_left_batch,
bomb_num_batch,
bid_info_batch,
multiply_info_batch,
my_action_batch))
x_no_action = np.hstack((position_info,
my_handcards,
other_handcards,
three_landlord_cards,
last_action,
landlord_played_cards,
landlord_up_played_cards,
landlord_down_played_cards,
landlord_num_cards_left,
landlord_up_num_cards_left,
landlord_down_num_cards_left,
bomb_num))
z = _action_seq_list2array(_process_action_seq([], 32))
z_batch = np.repeat(
z[np.newaxis, :, :],
num_legal_actions, axis=0)
obs = {
'position': "",
'x_batch': x_batch.astype(np.float32),
'z_batch': z_batch.astype(np.float32),
'legal_actions': multiply_info_batch,
'x_no_action': x_no_action.astype(np.int8),
'z': z.astype(np.int8),
"bid_info": bid_info.astype(np.int8),
"multiply_info_batch": multiply_info.astype(np.int8)
}
return obs
|
normal
|
{
"blob_id": "4015078ee9640c4558a4f29ebbb89f9098a31014",
"index": 5720,
"step-1": "<mask token>\n\n\nclass Env:\n <mask token>\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[\n 20:37], 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20]}\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -\n 1], [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[\n bidding_player])\n if wr >= 0.7:\n action = {'action': 1}\n bid_limit += 1\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards}\n card_play_data['landlord'].sort()\n player_ids = {'landlord': last_bid, 'landlord_up': (last_bid - \n 1) % 3, 'landlord_down': (last_bid + 1) % 3}\n player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):\n 'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}\n for bid_obs in bid_obs_buffer:\n bid_obs.update({'position': player_positions[bid_obs['pid']]})\n self._env.card_play_init(card_play_data)\n multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.\n array([0, 0, 1])]\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %\n 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n action = {'action': 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action\n ['action']]\n self._env.multiply_count[pos] = action['action']\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.\n total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,\n 'multiply_obs_buffer': multiply_obs_buffer}\n <mask token>\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num\n + self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[\n pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n elif self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +\n self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos\n ] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2 ** (self._env.bid_count - 1) / 8\n else:\n return -1.0 * 2 ** (self._env.bid_count - 1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n <mask token>\n <mask token>\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Env:\n \"\"\"\n Doudizhu multi-agent wrapper\n \"\"\"\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[\n 20:37], 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20]}\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -\n 1], [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[\n bidding_player])\n if wr >= 0.7:\n action = {'action': 1}\n bid_limit += 1\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards}\n card_play_data['landlord'].sort()\n player_ids = {'landlord': last_bid, 'landlord_up': (last_bid - \n 1) % 3, 'landlord_down': (last_bid + 1) % 3}\n player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):\n 'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}\n for bid_obs in bid_obs_buffer:\n bid_obs.update({'position': player_positions[bid_obs['pid']]})\n self._env.card_play_init(card_play_data)\n multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.\n array([0, 0, 1])]\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %\n 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n action = {'action': 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action\n ['action']]\n self._env.multiply_count[pos] = action['action']\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.\n total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,\n 'multiply_obs_buffer': multiply_obs_buffer}\n\n def step(self, action):\n \"\"\"\n Step function takes as input the action, which\n is a list of integers, and output the next obervation,\n reward, and a Boolean variable indicating whether the\n current game is finished. It also returns an empty\n dictionary that is reserved to pass useful information.\n \"\"\"\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {'play': {'landlord': self._get_reward('landlord'),\n 'landlord_up': self._get_reward('landlord_up'),\n 'landlord_down': self._get_reward('landlord_down')}, 'bid':\n {'landlord': self._get_reward_bidding('landlord') * 2,\n 'landlord_up': self._get_reward_bidding('landlord_up'),\n 'landlord_down': self._get_reward_bidding('landlord_down')}}\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num\n + self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[\n pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n elif self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +\n self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos\n ] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2 ** (self._env.bid_count - 1) / 8\n else:\n return -1.0 * 2 ** (self._env.bid_count - 1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n\n @property\n def _acting_player_position(self):\n \"\"\"\n The player that is active. It can be landlord,\n landlod_down, or landlord_up.\n \"\"\"\n return self._env.acting_player_position\n\n @property\n def _game_over(self):\n \"\"\" Returns a Boolean\n \"\"\"\n return self._env.game_over\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\ndef get_obs(infoset, use_general=True):\n \"\"\"\n This function obtains observations with imperfect information\n from the infoset. It has three branches since we encode\n different features for different positions.\n\n This function will return dictionary named `obs`. It contains\n several fields. These fields will be used to train the model.\n One can play with those features to improve the performance.\n\n `position` is a string that can be landlord/landlord_down/landlord_up\n\n `x_batch` is a batch of features (excluding the hisorical moves).\n It also encodes the action feature\n\n `z_batch` is a batch of features with hisorical moves only.\n\n `legal_actions` is the legal moves\n\n `x_no_action`: the features (exluding the hitorical moves and\n the action features). It does not have the batch dim.\n\n `z`: same as z_batch but not a batch.\n \"\"\"\n if use_general:\n if infoset.player_position not in ['landlord', 'landlord_up',\n 'landlord_down']:\n raise ValueError('')\n return _get_obs_general(infoset, infoset.player_position)\n elif infoset.player_position == 'landlord':\n return _get_obs_landlord(infoset)\n elif infoset.player_position == 'landlord_up':\n return _get_obs_landlord_up(infoset)\n elif infoset.player_position == 'landlord_down':\n return _get_obs_landlord_down(infoset)\n else:\n raise ValueError('')\n\n\n<mask token>\n\n\ndef _cards2array(list_cards):\n \"\"\"\n A utility function that transforms the actions, i.e.,\n A list of integers into card matrix. Here we remove\n the six entries that are always zero and flatten the\n the representations.\n \"\"\"\n if len(list_cards) == 0:\n return np.zeros(54, dtype=np.int8)\n matrix = np.zeros([4, 13], dtype=np.int8)\n jokers = np.zeros(2, dtype=np.int8)\n counter = Counter(list_cards)\n for card, num_times in counter.items():\n if card < 20:\n matrix[:, Card2Column[card]] = NumOnes2Array[num_times]\n elif card == 20:\n jokers[0] = 1\n elif card == 30:\n jokers[1] = 1\n return np.concatenate((matrix.flatten('F'), jokers))\n\n\n<mask token>\n\n\ndef _process_action_seq(sequence, length=15, new_model=True):\n \"\"\"\n A utility function encoding historical moves. We\n encode 15 moves. If there is no 15 moves, we pad\n with zeros.\n \"\"\"\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence\n\n\ndef _get_one_hot_bomb(bomb_num):\n \"\"\"\n A utility function to encode the number of bombs\n into one-hot representation.\n \"\"\"\n one_hot = np.zeros(15)\n one_hot[bomb_num] = 1\n return one_hot\n\n\n<mask token>\n\n\ndef _get_obs_landlord_up(infoset):\n \"\"\"\n Obttain the landlord_up features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']\n )\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_down(infoset):\n \"\"\"\n Obttain the landlord_down features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.\n float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':\n infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8)}\n return obs\n\n\n<mask token>\n\n\ndef _get_obs_general(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n num_cards_left = np.hstack((landlord_num_cards_left,\n landlord_up_num_cards_left, landlord_down_num_cards_left))\n x_batch = np.hstack((bid_info_batch, multiply_info_batch))\n x_no_action = np.hstack((bid_info, multiply_info))\n z = np.vstack((num_cards_left, my_handcards, other_handcards,\n three_landlord_cards, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))))\n _z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n my_action_batch = my_action_batch[:, np.newaxis, :]\n z_batch = np.zeros([len(_z_batch), 40, 54], int)\n for i in range(0, len(_z_batch)):\n z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Env:\n \"\"\"\n Doudizhu multi-agent wrapper\n \"\"\"\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[\n 20:37], 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20]}\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -\n 1], [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[\n bidding_player])\n if wr >= 0.7:\n action = {'action': 1}\n bid_limit += 1\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards}\n card_play_data['landlord'].sort()\n player_ids = {'landlord': last_bid, 'landlord_up': (last_bid - \n 1) % 3, 'landlord_down': (last_bid + 1) % 3}\n player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):\n 'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}\n for bid_obs in bid_obs_buffer:\n bid_obs.update({'position': player_positions[bid_obs['pid']]})\n self._env.card_play_init(card_play_data)\n multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.\n array([0, 0, 1])]\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %\n 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n action = {'action': 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action\n ['action']]\n self._env.multiply_count[pos] = action['action']\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.\n total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,\n 'multiply_obs_buffer': multiply_obs_buffer}\n\n def step(self, action):\n \"\"\"\n Step function takes as input the action, which\n is a list of integers, and output the next obervation,\n reward, and a Boolean variable indicating whether the\n current game is finished. It also returns an empty\n dictionary that is reserved to pass useful information.\n \"\"\"\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {'play': {'landlord': self._get_reward('landlord'),\n 'landlord_up': self._get_reward('landlord_up'),\n 'landlord_down': self._get_reward('landlord_down')}, 'bid':\n {'landlord': self._get_reward_bidding('landlord') * 2,\n 'landlord_up': self._get_reward_bidding('landlord_up'),\n 'landlord_down': self._get_reward_bidding('landlord_down')}}\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num\n + self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[\n pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n elif self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +\n self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos\n ] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2 ** (self._env.bid_count - 1) / 8\n else:\n return -1.0 * 2 ** (self._env.bid_count - 1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n\n @property\n def _acting_player_position(self):\n \"\"\"\n The player that is active. It can be landlord,\n landlod_down, or landlord_up.\n \"\"\"\n return self._env.acting_player_position\n\n @property\n def _game_over(self):\n \"\"\" Returns a Boolean\n \"\"\"\n return self._env.game_over\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\ndef get_obs(infoset, use_general=True):\n \"\"\"\n This function obtains observations with imperfect information\n from the infoset. It has three branches since we encode\n different features for different positions.\n\n This function will return dictionary named `obs`. It contains\n several fields. These fields will be used to train the model.\n One can play with those features to improve the performance.\n\n `position` is a string that can be landlord/landlord_down/landlord_up\n\n `x_batch` is a batch of features (excluding the hisorical moves).\n It also encodes the action feature\n\n `z_batch` is a batch of features with hisorical moves only.\n\n `legal_actions` is the legal moves\n\n `x_no_action`: the features (exluding the hitorical moves and\n the action features). It does not have the batch dim.\n\n `z`: same as z_batch but not a batch.\n \"\"\"\n if use_general:\n if infoset.player_position not in ['landlord', 'landlord_up',\n 'landlord_down']:\n raise ValueError('')\n return _get_obs_general(infoset, infoset.player_position)\n elif infoset.player_position == 'landlord':\n return _get_obs_landlord(infoset)\n elif infoset.player_position == 'landlord_up':\n return _get_obs_landlord_up(infoset)\n elif infoset.player_position == 'landlord_down':\n return _get_obs_landlord_down(infoset)\n else:\n raise ValueError('')\n\n\ndef _get_one_hot_array(num_left_cards, max_num_cards):\n \"\"\"\n A utility function to obtain one-hot endoding\n \"\"\"\n one_hot = np.zeros(max_num_cards)\n if num_left_cards > 0:\n one_hot[num_left_cards - 1] = 1\n return one_hot\n\n\ndef _cards2array(list_cards):\n \"\"\"\n A utility function that transforms the actions, i.e.,\n A list of integers into card matrix. Here we remove\n the six entries that are always zero and flatten the\n the representations.\n \"\"\"\n if len(list_cards) == 0:\n return np.zeros(54, dtype=np.int8)\n matrix = np.zeros([4, 13], dtype=np.int8)\n jokers = np.zeros(2, dtype=np.int8)\n counter = Counter(list_cards)\n for card, num_times in counter.items():\n if card < 20:\n matrix[:, Card2Column[card]] = NumOnes2Array[num_times]\n elif card == 20:\n jokers[0] = 1\n elif card == 30:\n jokers[1] = 1\n return np.concatenate((matrix.flatten('F'), jokers))\n\n\n<mask token>\n\n\ndef _process_action_seq(sequence, length=15, new_model=True):\n \"\"\"\n A utility function encoding historical moves. We\n encode 15 moves. If there is no 15 moves, we pad\n with zeros.\n \"\"\"\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence\n\n\ndef _get_one_hot_bomb(bomb_num):\n \"\"\"\n A utility function to encode the number of bombs\n into one-hot representation.\n \"\"\"\n one_hot = np.zeros(15)\n one_hot[bomb_num] = 1\n return one_hot\n\n\ndef _get_obs_landlord(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n last_action_batch, landlord_up_played_cards_batch,\n landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards, last_action,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_up(infoset):\n \"\"\"\n Obttain the landlord_up features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']\n )\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_down(infoset):\n \"\"\"\n Obttain the landlord_down features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.\n float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':\n infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_withbid(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n last_action_batch, landlord_up_played_cards_batch,\n landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards, last_action,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_general1(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num, bid_info, multiply_info))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_general(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n num_cards_left = np.hstack((landlord_num_cards_left,\n landlord_up_num_cards_left, landlord_down_num_cards_left))\n x_batch = np.hstack((bid_info_batch, multiply_info_batch))\n x_no_action = np.hstack((bid_info, multiply_info))\n z = np.vstack((num_cards_left, my_handcards, other_handcards,\n three_landlord_cards, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))))\n _z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n my_action_batch = my_action_batch[:, np.newaxis, :]\n z_batch = np.zeros([len(_z_batch), 40, 54], int)\n for i in range(0, len(_z_batch)):\n z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef gen_bid_legal_actions(player_id, bid_info):\n self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id +\n 1) % 3]]\n curr_round = -1\n for r in range(4):\n if -1 in self_bid_info[r]:\n curr_round = r\n break\n bid_actions = []\n if curr_round != -1:\n self_bid_info[curr_round] = [0, 0, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n self_bid_info[curr_round] = [0, 1, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n return np.array(bid_actions)\n\n\ndef _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_info = np.array([0, 0, 0])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = bid_legal_actions\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array([])\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(2):\n my_action_batch[j, :] = _cards2array([])\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n z_batch.astype(np.float32), 'legal_actions': bid_legal_actions,\n 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),\n 'bid_info_batch': bid_info_batch.astype(np.int8), 'multiply_info':\n multiply_info.astype(np.int8)}\n return obs\n\n\ndef _get_obs_for_bid(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])\n x_batch = np.hstack((my_handcards_batch, bid_info_batch))\n x_no_action = np.hstack(my_handcards)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n np.array([0, 0]), 'legal_actions': bid_legal_actions, 'x_no_action':\n x_no_action.astype(np.int8), 'bid_info_batch': bid_info_batch.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 3\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n three_landlord_cards = _cards2array(landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(num_legal_actions):\n my_action_batch[j, :] = _cards2array([])\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n z_batch.astype(np.float32), 'legal_actions': multiply_info_batch,\n 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),\n 'bid_info': bid_info.astype(np.int8), 'multiply_info_batch':\n multiply_info.astype(np.int8)}\n return obs\n",
"step-4": "from collections import Counter\nimport numpy as np\nimport random\nimport torch\nimport BidModel\nfrom douzero.env.game import GameEnv\nenv_version = '3.2'\nenv_url = 'http://od.vcccz.com/hechuan/env.py'\nCard2Column = {(3): 0, (4): 1, (5): 2, (6): 3, (7): 4, (8): 5, (9): 6, (10):\n 7, (11): 8, (12): 9, (13): 10, (14): 11, (17): 12}\nNumOnes2Array = {(0): np.array([0, 0, 0, 0]), (1): np.array([1, 0, 0, 0]),\n (2): np.array([1, 1, 0, 0]), (3): np.array([1, 1, 1, 0]), (4): np.array\n ([1, 1, 1, 1])}\ndeck = []\nfor i in range(3, 15):\n deck.extend([i for _ in range(4)])\ndeck.extend([(17) for _ in range(4)])\ndeck.extend([20, 30])\n\n\nclass Env:\n \"\"\"\n Doudizhu multi-agent wrapper\n \"\"\"\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20], 'landlord_up': _deck[\n 20:37], 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20]}\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [_deck[:17], _deck[17:34], _deck[34:51]]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1], [-1, -1, -1], [-1, -1, -\n 1], [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[\n bidding_player])\n if wr >= 0.7:\n action = {'action': 1}\n bid_limit += 1\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info,\n card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward('bidding', torch.tensor(\n bidding_obs['z_batch'], device=device), torch.\n tensor(bidding_obs['x_batch'], device=device),\n flags=flags)\n bid_obs_buffer.append({'x_batch': bidding_obs['x_batch'\n ][action['action']], 'z_batch': bidding_obs[\n 'z_batch'][action['action']], 'pid': bidding_player})\n if action['action'] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards}\n card_play_data['landlord'].sort()\n player_ids = {'landlord': last_bid, 'landlord_up': (last_bid - \n 1) % 3, 'landlord_down': (last_bid + 1) % 3}\n player_positions = {last_bid: 'landlord', ((last_bid - 1) % 3):\n 'landlord_up', ((last_bid + 1) % 3): 'landlord_down'}\n for bid_obs in bid_obs_buffer:\n bid_obs.update({'position': player_positions[bid_obs['pid']]})\n self._env.card_play_init(card_play_data)\n multiply_map = [np.array([1, 0, 0]), np.array([0, 1, 0]), np.\n array([0, 0, 1])]\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) %\n 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n action = {'action': 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action\n ['action']]\n self._env.multiply_count[pos] = action['action']\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print('发牌情况: %i/%i %.1f%%' % (self.force_bid, self.\n total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {'bid_obs_buffer': bid_obs_buffer,\n 'multiply_obs_buffer': multiply_obs_buffer}\n\n def step(self, action):\n \"\"\"\n Step function takes as input the action, which\n is a list of integers, and output the next obervation,\n reward, and a Boolean variable indicating whether the\n current game is finished. It also returns an empty\n dictionary that is reserved to pass useful information.\n \"\"\"\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {'play': {'landlord': self._get_reward('landlord'),\n 'landlord_up': self._get_reward('landlord_up'),\n 'landlord_down': self._get_reward('landlord_down')}, 'bid':\n {'landlord': self._get_reward_bidding('landlord') * 2,\n 'landlord_up': self._get_reward_bidding('landlord_up'),\n 'landlord_down': self._get_reward_bidding('landlord_down')}}\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num\n + self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[\n pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n elif self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +\n self._env.multiply_count[pos]) / 8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033\n ) * 1.3 ** self_bomb_num * 2 ** self._env.multiply_count[pos\n ] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2 ** (self._env.bid_count - 1) / 8\n else:\n return -1.0 * 2 ** (self._env.bid_count - 1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n\n @property\n def _acting_player_position(self):\n \"\"\"\n The player that is active. It can be landlord,\n landlod_down, or landlord_up.\n \"\"\"\n return self._env.acting_player_position\n\n @property\n def _game_over(self):\n \"\"\" Returns a Boolean\n \"\"\"\n return self._env.game_over\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\ndef get_obs(infoset, use_general=True):\n \"\"\"\n This function obtains observations with imperfect information\n from the infoset. It has three branches since we encode\n different features for different positions.\n\n This function will return dictionary named `obs`. It contains\n several fields. These fields will be used to train the model.\n One can play with those features to improve the performance.\n\n `position` is a string that can be landlord/landlord_down/landlord_up\n\n `x_batch` is a batch of features (excluding the hisorical moves).\n It also encodes the action feature\n\n `z_batch` is a batch of features with hisorical moves only.\n\n `legal_actions` is the legal moves\n\n `x_no_action`: the features (exluding the hitorical moves and\n the action features). It does not have the batch dim.\n\n `z`: same as z_batch but not a batch.\n \"\"\"\n if use_general:\n if infoset.player_position not in ['landlord', 'landlord_up',\n 'landlord_down']:\n raise ValueError('')\n return _get_obs_general(infoset, infoset.player_position)\n elif infoset.player_position == 'landlord':\n return _get_obs_landlord(infoset)\n elif infoset.player_position == 'landlord_up':\n return _get_obs_landlord_up(infoset)\n elif infoset.player_position == 'landlord_down':\n return _get_obs_landlord_down(infoset)\n else:\n raise ValueError('')\n\n\ndef _get_one_hot_array(num_left_cards, max_num_cards):\n \"\"\"\n A utility function to obtain one-hot endoding\n \"\"\"\n one_hot = np.zeros(max_num_cards)\n if num_left_cards > 0:\n one_hot[num_left_cards - 1] = 1\n return one_hot\n\n\ndef _cards2array(list_cards):\n \"\"\"\n A utility function that transforms the actions, i.e.,\n A list of integers into card matrix. Here we remove\n the six entries that are always zero and flatten the\n the representations.\n \"\"\"\n if len(list_cards) == 0:\n return np.zeros(54, dtype=np.int8)\n matrix = np.zeros([4, 13], dtype=np.int8)\n jokers = np.zeros(2, dtype=np.int8)\n counter = Counter(list_cards)\n for card, num_times in counter.items():\n if card < 20:\n matrix[:, Card2Column[card]] = NumOnes2Array[num_times]\n elif card == 20:\n jokers[0] = 1\n elif card == 30:\n jokers[1] = 1\n return np.concatenate((matrix.flatten('F'), jokers))\n\n\ndef _action_seq_list2array(action_seq_list, new_model=True):\n \"\"\"\n A utility function to encode the historical moves.\n We encode the historical 15 actions. If there is\n no 15 actions, we pad the features with 0. Since\n three moves is a round in DouDizhu, we concatenate\n the representations for each consecutive three moves.\n Finally, we obtain a 5x162 matrix, which will be fed\n into LSTM for encoding.\n \"\"\"\n if new_model:\n position_map = {'landlord': 0, 'landlord_up': 1, 'landlord_down': 2}\n action_seq_array = np.ones((len(action_seq_list), 54)) * -1\n for row, list_cards in enumerate(action_seq_list):\n if list_cards != []:\n action_seq_array[row, :54] = _cards2array(list_cards[1])\n else:\n action_seq_array = np.zeros((len(action_seq_list), 54))\n for row, list_cards in enumerate(action_seq_list):\n if list_cards != []:\n action_seq_array[row, :] = _cards2array(list_cards[1])\n action_seq_array = action_seq_array.reshape(5, 162)\n return action_seq_array\n\n\ndef _process_action_seq(sequence, length=15, new_model=True):\n \"\"\"\n A utility function encoding historical moves. We\n encode 15 moves. If there is no 15 moves, we pad\n with zeros.\n \"\"\"\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence\n\n\ndef _get_one_hot_bomb(bomb_num):\n \"\"\"\n A utility function to encode the number of bombs\n into one-hot representation.\n \"\"\"\n one_hot = np.zeros(15)\n one_hot[bomb_num] = 1\n return one_hot\n\n\ndef _get_obs_landlord(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n last_action_batch, landlord_up_played_cards_batch,\n landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards, last_action,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_up(infoset):\n \"\"\"\n Obttain the landlord_up features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_down']\n )\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_down'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_up', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_down(infoset):\n \"\"\"\n Obttain the landlord_down features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n last_landlord_action = _cards2array(infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(last_landlord_action[np.newaxis,\n :], num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n last_teammate_action = _cards2array(infoset.last_move_dict['landlord_up'])\n last_teammate_action_batch = np.repeat(last_teammate_action[np.newaxis,\n :], num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n teammate_num_cards_left_batch = np.repeat(teammate_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n teammate_played_cards = _cards2array(infoset.played_cards['landlord_up'])\n teammate_played_cards_batch = np.repeat(teammate_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n landlord_played_cards_batch, teammate_played_cards_batch,\n last_action_batch, last_landlord_action_batch,\n last_teammate_action_batch, landlord_num_cards_left_batch,\n teammate_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards,\n landlord_played_cards, teammate_played_cards, last_action,\n last_landlord_action, last_teammate_action, landlord_num_cards_left,\n teammate_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord_down', 'x_batch': x_batch.astype(np.\n float32), 'z_batch': z_batch.astype(np.float32), 'legal_actions':\n infoset.legal_actions, 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8)}\n return obs\n\n\ndef _get_obs_landlord_withbid(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((my_handcards_batch, other_handcards_batch,\n last_action_batch, landlord_up_played_cards_batch,\n landlord_down_played_cards_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, my_action_batch))\n x_no_action = np.hstack((my_handcards, other_handcards, last_action,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_up_num_cards_left, landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 15, False), False)\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': 'landlord', 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_general1(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num, bid_info, multiply_info))\n z = _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_general(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n landlord_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(infoset.\n num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n other_handcards_left_list = []\n for pos in ['landlord', 'landlord_up', 'landlord_up']:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n landlord_played_cards = _cards2array(infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array(infoset.played_cards['landlord_up']\n )\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array(infoset.played_cards[\n 'landlord_down'])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(infoset.bomb_num)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n num_cards_left = np.hstack((landlord_num_cards_left,\n landlord_up_num_cards_left, landlord_down_num_cards_left))\n x_batch = np.hstack((bid_info_batch, multiply_info_batch))\n x_no_action = np.hstack((bid_info, multiply_info))\n z = np.vstack((num_cards_left, my_handcards, other_handcards,\n three_landlord_cards, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n _action_seq_list2array(_process_action_seq(infoset.\n card_play_action_seq, 32))))\n _z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n my_action_batch = my_action_batch[:, np.newaxis, :]\n z_batch = np.zeros([len(_z_batch), 40, 54], int)\n for i in range(0, len(_z_batch)):\n z_batch[i] = np.vstack((my_action_batch[i], _z_batch[i]))\n obs = {'position': position, 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32), 'legal_actions': infoset.\n legal_actions, 'x_no_action': x_no_action.astype(np.int8), 'z': z.\n astype(np.int8)}\n return obs\n\n\ndef gen_bid_legal_actions(player_id, bid_info):\n self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id +\n 1) % 3]]\n curr_round = -1\n for r in range(4):\n if -1 in self_bid_info[r]:\n curr_round = r\n break\n bid_actions = []\n if curr_round != -1:\n self_bid_info[curr_round] = [0, 0, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n self_bid_info[curr_round] = [0, 1, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n return np.array(bid_actions)\n\n\ndef _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_info = np.array([0, 0, 0])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = bid_legal_actions\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n three_landlord_cards = _cards2array([])\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(2):\n my_action_batch[j, :] = _cards2array([])\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n z_batch.astype(np.float32), 'legal_actions': bid_legal_actions,\n 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),\n 'bid_info_batch': bid_info_batch.astype(np.int8), 'multiply_info':\n multiply_info.astype(np.int8)}\n return obs\n\n\ndef _get_obs_for_bid(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])\n x_batch = np.hstack((my_handcards_batch, bid_info_batch))\n x_no_action = np.hstack(my_handcards)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n np.array([0, 0]), 'legal_actions': bid_legal_actions, 'x_no_action':\n x_no_action.astype(np.int8), 'bid_info_batch': bid_info_batch.\n astype(np.int8)}\n return obs\n\n\ndef _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12,\n 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 3\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n position_map = {'landlord': [1, 0, 0], 'landlord_up': [0, 1, 0],\n 'landlord_down': [0, 0, 1]}\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n bid_info = np.array(bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :], num_legal_actions,\n axis=0)\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n three_landlord_cards = _cards2array(landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis,\n :], num_legal_actions, axis=0)\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(num_legal_actions):\n my_action_batch[j, :] = _cards2array([])\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(landlord_num_cards_left[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(landlord_up_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(landlord_down_num_cards_left\n [np.newaxis, :], num_legal_actions, axis=0)\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(landlord_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(landlord_up_played_cards[np.\n newaxis, :], num_legal_actions, axis=0)\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(landlord_down_played_cards\n [np.newaxis, :], num_legal_actions, axis=0)\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(bomb_num[np.newaxis, :], num_legal_actions,\n axis=0)\n x_batch = np.hstack((position_info_batch, my_handcards_batch,\n other_handcards_batch, three_landlord_cards_batch,\n last_action_batch, landlord_played_cards_batch,\n landlord_up_played_cards_batch, landlord_down_played_cards_batch,\n landlord_num_cards_left_batch, landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch, bomb_num_batch, bid_info_batch,\n multiply_info_batch, my_action_batch))\n x_no_action = np.hstack((position_info, my_handcards, other_handcards,\n three_landlord_cards, last_action, landlord_played_cards,\n landlord_up_played_cards, landlord_down_played_cards,\n landlord_num_cards_left, landlord_up_num_cards_left,\n landlord_down_num_cards_left, bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(z[np.newaxis, :, :], num_legal_actions, axis=0)\n obs = {'position': '', 'x_batch': x_batch.astype(np.float32), 'z_batch':\n z_batch.astype(np.float32), 'legal_actions': multiply_info_batch,\n 'x_no_action': x_no_action.astype(np.int8), 'z': z.astype(np.int8),\n 'bid_info': bid_info.astype(np.int8), 'multiply_info_batch':\n multiply_info.astype(np.int8)}\n return obs\n",
"step-5": "from collections import Counter\nimport numpy as np\nimport random\nimport torch\nimport BidModel\n\nfrom douzero.env.game import GameEnv\n\nenv_version = \"3.2\"\nenv_url = \"http://od.vcccz.com/hechuan/env.py\"\nCard2Column = {3: 0, 4: 1, 5: 2, 6: 3, 7: 4, 8: 5, 9: 6, 10: 7,\n 11: 8, 12: 9, 13: 10, 14: 11, 17: 12}\n\nNumOnes2Array = {0: np.array([0, 0, 0, 0]),\n 1: np.array([1, 0, 0, 0]),\n 2: np.array([1, 1, 0, 0]),\n 3: np.array([1, 1, 1, 0]),\n 4: np.array([1, 1, 1, 1])}\n\ndeck = []\nfor i in range(3, 15):\n deck.extend([i for _ in range(4)])\ndeck.extend([17 for _ in range(4)])\ndeck.extend([20, 30])\n\n\nclass Env:\n \"\"\"\n Doudizhu multi-agent wrapper\n \"\"\"\n\n def __init__(self, objective):\n \"\"\"\n Objective is wp/adp/logadp. It indicates whether considers\n bomb in reward calculation. Here, we use dummy agents.\n This is because, in the orignial game, the players\n are `in` the game. Here, we want to isolate\n players and environments to have a more gym style\n interface. To achieve this, we use dummy players\n to play. For each move, we tell the corresponding\n dummy player which action to play, then the player\n will perform the actual action in the game engine.\n \"\"\"\n self.objective = objective\n\n # Initialize players\n # We use three dummy player for the target position\n self.players = {}\n for position in ['landlord', 'landlord_up', 'landlord_down']:\n self.players[position] = DummyAgent(position)\n\n # Initialize the internal environment\n self._env = GameEnv(self.players)\n self.total_round = 0\n self.force_bid = 0\n self.infoset = None\n\n def reset(self, model, device, flags=None):\n \"\"\"\n Every time reset is called, the environment\n will be re-initialized with a new deck of cards.\n This function is usually called when a game is over.\n \"\"\"\n self._env.reset()\n\n # Randomly shuffle the deck\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20],\n 'landlord_up': _deck[20:37],\n 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20],\n }\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [\n _deck[:17],\n _deck[17:34],\n _deck[34:51],\n ]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n # bidding_player = 0 # debug\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward(\"bidding\", torch.tensor(bidding_obs[\"z_batch\"], device=device),\n torch.tensor(bidding_obs[\"x_batch\"], device=device), flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[bidding_player])\n if wr >= 0.7:\n action = {\"action\": 1} # debug\n bid_limit += 1\n\n bid_obs_buffer.append({\n \"x_batch\": bidding_obs[\"x_batch\"][action[\"action\"]],\n \"z_batch\": bidding_obs[\"z_batch\"][action[\"action\"]],\n \"pid\": bidding_player\n })\n if action[\"action\"] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward(\"bidding\", torch.tensor(bidding_obs[\"z_batch\"], device=device),\n torch.tensor(bidding_obs[\"x_batch\"], device=device), flags=flags)\n bid_obs_buffer.append({\n \"x_batch\": bidding_obs[\"x_batch\"][action[\"action\"]],\n \"z_batch\": bidding_obs[\"z_batch\"][action[\"action\"]],\n \"pid\": bidding_player\n })\n if action[\"action\"] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards,\n }\n card_play_data[\"landlord\"].sort()\n player_ids = {\n 'landlord': last_bid,\n 'landlord_up': (last_bid - 1) % 3,\n 'landlord_down': (last_bid + 1) % 3,\n }\n player_positions = {\n last_bid: 'landlord',\n (last_bid - 1) % 3: 'landlord_up',\n (last_bid + 1) % 3: 'landlord_down'\n }\n for bid_obs in bid_obs_buffer:\n bid_obs.update({\"position\": player_positions[bid_obs[\"pid\"]]})\n\n # Initialize the cards\n self._env.card_play_init(card_play_data)\n multiply_map = [\n np.array([1, 0, 0]),\n np.array([0, 1, 0]),\n np.array([0, 0, 1])\n ]\n for pos in [\"landlord\", \"landlord_up\", \"landlord_down\"]:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) % 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n # multiply_obs = _get_obs_for_multiply(pos, self._env.info_sets[pos].bid_info, card_play_data[pos],\n # landlord_cards)\n # action = model.forward(pos, torch.tensor(multiply_obs[\"z_batch\"], device=device),\n # torch.tensor(multiply_obs[\"x_batch\"], device=device), flags=flags)\n # multiply_obs_buffer.append({\n # \"x_batch\": multiply_obs[\"x_batch\"][action[\"action\"]],\n # \"z_batch\": multiply_obs[\"z_batch\"][action[\"action\"]],\n # \"position\": pos\n # })\n action = {\"action\": 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action[\"action\"]]\n self._env.multiply_count[pos] = action[\"action\"]\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print(\"发牌情况: %i/%i %.1f%%\" % (self.force_bid, self.total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {\n \"bid_obs_buffer\": bid_obs_buffer,\n \"multiply_obs_buffer\": multiply_obs_buffer\n }\n\n def step(self, action):\n \"\"\"\n Step function takes as input the action, which\n is a list of integers, and output the next obervation,\n reward, and a Boolean variable indicating whether the\n current game is finished. It also returns an empty\n dictionary that is reserved to pass useful information.\n \"\"\"\n assert action in self.infoset.legal_actions\n self.players[self._acting_player_position].set_action(action)\n self._env.step()\n self.infoset = self._game_infoset\n done = False\n reward = 0.0\n if self._game_over:\n done = True\n reward = {\n \"play\": {\n \"landlord\": self._get_reward(\"landlord\"),\n \"landlord_up\": self._get_reward(\"landlord_up\"),\n \"landlord_down\": self._get_reward(\"landlord_down\")\n },\n \"bid\": {\n \"landlord\": self._get_reward_bidding(\"landlord\")*2,\n \"landlord_up\": self._get_reward_bidding(\"landlord_up\"),\n \"landlord_down\": self._get_reward_bidding(\"landlord_down\")\n }\n }\n obs = None\n else:\n obs = get_obs(self.infoset)\n return obs, reward, done, {}\n\n def _get_reward(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n self_bomb_num = self._env.pos_bomb_num[pos]\n if winner == 'landlord':\n if self.objective == 'adp':\n return (1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8\n elif self.objective == 'logadp':\n return (1.0 - self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4\n else:\n return 1.0 - self._env.step_count * 0.0033\n else:\n if self.objective == 'adp':\n return (-1.1 - self._env.step_count * 0.0033) * 1.3 ** (bomb_num +self._env.multiply_count[pos]) /8\n elif self.objective == 'logadp':\n return (-1.0 + self._env.step_count * 0.0033) * 1.3**self_bomb_num * 2**self._env.multiply_count[pos] / 4\n else:\n return -1.0 + self._env.step_count * 0.0033\n\n def _get_reward_bidding(self, pos):\n \"\"\"\n This function is called in the end of each\n game. It returns either 1/-1 for win/loss,\n or ADP, i.e., every bomb will double the score.\n \"\"\"\n winner = self._game_winner\n bomb_num = self._game_bomb_num\n if winner == 'landlord':\n return 1.0 * 2**(self._env.bid_count-1) / 8\n else:\n return -1.0 * 2**(self._env.bid_count-1) / 8\n\n @property\n def _game_infoset(self):\n \"\"\"\n Here, inforset is defined as all the information\n in the current situation, incuding the hand cards\n of all the players, all the historical moves, etc.\n That is, it contains perferfect infomation. Later,\n we will use functions to extract the observable\n information from the views of the three players.\n \"\"\"\n return self._env.game_infoset\n\n @property\n def _game_bomb_num(self):\n \"\"\"\n The number of bombs played so far. This is used as\n a feature of the neural network and is also used to\n calculate ADP.\n \"\"\"\n return self._env.get_bomb_num()\n\n @property\n def _game_winner(self):\n \"\"\" A string of landlord/peasants\n \"\"\"\n return self._env.get_winner()\n\n @property\n def _acting_player_position(self):\n \"\"\"\n The player that is active. It can be landlord,\n landlod_down, or landlord_up.\n \"\"\"\n return self._env.acting_player_position\n\n @property\n def _game_over(self):\n \"\"\" Returns a Boolean\n \"\"\"\n return self._env.game_over\n\n\nclass DummyAgent(object):\n \"\"\"\n Dummy agent is designed to easily interact with the\n game engine. The agent will first be told what action\n to perform. Then the environment will call this agent\n to perform the actual action. This can help us to\n isolate environment and agents towards a gym like\n interface.\n \"\"\"\n\n def __init__(self, position):\n self.position = position\n self.action = None\n\n def act(self, infoset):\n \"\"\"\n Simply return the action that is set previously.\n \"\"\"\n assert self.action in infoset.legal_actions\n return self.action\n\n def set_action(self, action):\n \"\"\"\n The environment uses this function to tell\n the dummy agent what to do.\n \"\"\"\n self.action = action\n\n\ndef get_obs(infoset, use_general=True):\n \"\"\"\n This function obtains observations with imperfect information\n from the infoset. It has three branches since we encode\n different features for different positions.\n\n This function will return dictionary named `obs`. It contains\n several fields. These fields will be used to train the model.\n One can play with those features to improve the performance.\n\n `position` is a string that can be landlord/landlord_down/landlord_up\n\n `x_batch` is a batch of features (excluding the hisorical moves).\n It also encodes the action feature\n\n `z_batch` is a batch of features with hisorical moves only.\n\n `legal_actions` is the legal moves\n\n `x_no_action`: the features (exluding the hitorical moves and\n the action features). It does not have the batch dim.\n\n `z`: same as z_batch but not a batch.\n \"\"\"\n if use_general:\n if infoset.player_position not in [\"landlord\", \"landlord_up\", \"landlord_down\"]:\n raise ValueError('')\n return _get_obs_general(infoset, infoset.player_position)\n else:\n if infoset.player_position == 'landlord':\n return _get_obs_landlord(infoset)\n elif infoset.player_position == 'landlord_up':\n return _get_obs_landlord_up(infoset)\n elif infoset.player_position == 'landlord_down':\n return _get_obs_landlord_down(infoset)\n else:\n raise ValueError('')\n\n\ndef _get_one_hot_array(num_left_cards, max_num_cards):\n \"\"\"\n A utility function to obtain one-hot endoding\n \"\"\"\n one_hot = np.zeros(max_num_cards)\n if num_left_cards > 0:\n one_hot[num_left_cards - 1] = 1\n\n return one_hot\n\n\ndef _cards2array(list_cards):\n \"\"\"\n A utility function that transforms the actions, i.e.,\n A list of integers into card matrix. Here we remove\n the six entries that are always zero and flatten the\n the representations.\n \"\"\"\n if len(list_cards) == 0:\n return np.zeros(54, dtype=np.int8)\n\n matrix = np.zeros([4, 13], dtype=np.int8)\n jokers = np.zeros(2, dtype=np.int8)\n counter = Counter(list_cards)\n for card, num_times in counter.items():\n if card < 20:\n matrix[:, Card2Column[card]] = NumOnes2Array[num_times]\n elif card == 20:\n jokers[0] = 1\n elif card == 30:\n jokers[1] = 1\n return np.concatenate((matrix.flatten('F'), jokers))\n\n\n# def _action_seq_list2array(action_seq_list):\n# \"\"\"\n# A utility function to encode the historical moves.\n# We encode the historical 15 actions. If there is\n# no 15 actions, we pad the features with 0. Since\n# three moves is a round in DouDizhu, we concatenate\n# the representations for each consecutive three moves.\n# Finally, we obtain a 5x162 matrix, which will be fed\n# into LSTM for encoding.\n# \"\"\"\n# action_seq_array = np.zeros((len(action_seq_list), 54))\n# for row, list_cards in enumerate(action_seq_list):\n# action_seq_array[row, :] = _cards2array(list_cards)\n# # action_seq_array = action_seq_array.reshape(5, 162)\n# return action_seq_array\n\ndef _action_seq_list2array(action_seq_list, new_model=True):\n \"\"\"\n A utility function to encode the historical moves.\n We encode the historical 15 actions. If there is\n no 15 actions, we pad the features with 0. Since\n three moves is a round in DouDizhu, we concatenate\n the representations for each consecutive three moves.\n Finally, we obtain a 5x162 matrix, which will be fed\n into LSTM for encoding.\n \"\"\"\n\n if new_model:\n position_map = {\"landlord\": 0, \"landlord_up\": 1, \"landlord_down\": 2}\n action_seq_array = np.ones((len(action_seq_list), 54)) * -1 # Default Value -1 for not using area\n for row, list_cards in enumerate(action_seq_list):\n if list_cards != []:\n action_seq_array[row, :54] = _cards2array(list_cards[1])\n else:\n action_seq_array = np.zeros((len(action_seq_list), 54))\n for row, list_cards in enumerate(action_seq_list):\n if list_cards != []:\n action_seq_array[row, :] = _cards2array(list_cards[1])\n action_seq_array = action_seq_array.reshape(5, 162)\n return action_seq_array\n\n # action_seq_array = np.zeros((len(action_seq_list), 54))\n # for row, list_cards in enumerate(action_seq_list):\n # if list_cards != []:\n # action_seq_array[row, :] = _cards2array(list_cards[1])\n # return action_seq_array\n\n\ndef _process_action_seq(sequence, length=15, new_model=True):\n \"\"\"\n A utility function encoding historical moves. We\n encode 15 moves. If there is no 15 moves, we pad\n with zeros.\n \"\"\"\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence\n\n\ndef _get_one_hot_bomb(bomb_num):\n \"\"\"\n A utility function to encode the number of bombs\n into one-hot representation.\n \"\"\"\n one_hot = np.zeros(15)\n one_hot[bomb_num] = 1\n return one_hot\n\n\ndef _get_obs_landlord(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n landlord_up_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((my_handcards_batch,\n other_handcards_batch,\n last_action_batch,\n landlord_up_played_cards_batch,\n landlord_down_played_cards_batch,\n landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch,\n bomb_num_batch,\n my_action_batch))\n x_no_action = np.hstack((my_handcards,\n other_handcards,\n last_action,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 15, False), False)\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': 'landlord',\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef _get_obs_landlord_up(infoset):\n \"\"\"\n Obttain the landlord_up features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n last_landlord_action = _cards2array(\n infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(\n last_landlord_action[np.newaxis, :],\n num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_teammate_action = _cards2array(\n infoset.last_move_dict['landlord_down'])\n last_teammate_action_batch = np.repeat(\n last_teammate_action[np.newaxis, :],\n num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n teammate_num_cards_left_batch = np.repeat(\n teammate_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n teammate_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n teammate_played_cards_batch = np.repeat(\n teammate_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((my_handcards_batch,\n other_handcards_batch,\n landlord_played_cards_batch,\n teammate_played_cards_batch,\n last_action_batch,\n last_landlord_action_batch,\n last_teammate_action_batch,\n landlord_num_cards_left_batch,\n teammate_num_cards_left_batch,\n bomb_num_batch,\n my_action_batch))\n x_no_action = np.hstack((my_handcards,\n other_handcards,\n landlord_played_cards,\n teammate_played_cards,\n last_action,\n last_landlord_action,\n last_teammate_action,\n landlord_num_cards_left,\n teammate_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 15, False), False)\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': 'landlord_up',\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef _get_obs_landlord_down(infoset):\n \"\"\"\n Obttain the landlord_down features. See Table 5 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n last_landlord_action = _cards2array(\n infoset.last_move_dict['landlord'])\n last_landlord_action_batch = np.repeat(\n last_landlord_action[np.newaxis, :],\n num_legal_actions, axis=0)\n landlord_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_teammate_action = _cards2array(\n infoset.last_move_dict['landlord_up'])\n last_teammate_action_batch = np.repeat(\n last_teammate_action[np.newaxis, :],\n num_legal_actions, axis=0)\n teammate_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n teammate_num_cards_left_batch = np.repeat(\n teammate_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n teammate_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n teammate_played_cards_batch = np.repeat(\n teammate_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((my_handcards_batch,\n other_handcards_batch,\n landlord_played_cards_batch,\n teammate_played_cards_batch,\n last_action_batch,\n last_landlord_action_batch,\n last_teammate_action_batch,\n landlord_num_cards_left_batch,\n teammate_num_cards_left_batch,\n bomb_num_batch,\n my_action_batch))\n x_no_action = np.hstack((my_handcards,\n other_handcards,\n landlord_played_cards,\n teammate_played_cards,\n last_action,\n last_landlord_action,\n last_teammate_action,\n landlord_num_cards_left,\n teammate_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 15, False), False)\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': 'landlord_down',\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef _get_obs_landlord_withbid(infoset):\n \"\"\"\n Obttain the landlord features. See Table 4 in\n https://arxiv.org/pdf/2106.06135.pdf\n \"\"\"\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n landlord_up_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((my_handcards_batch,\n other_handcards_batch,\n last_action_batch,\n landlord_up_played_cards_batch,\n landlord_down_played_cards_batch,\n landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch,\n bomb_num_batch,\n my_action_batch))\n x_no_action = np.hstack((my_handcards,\n other_handcards,\n last_action,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 15, False), False)\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': 'landlord',\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\n\ndef _get_obs_general1(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n position_map = {\n \"landlord\": [1, 0, 0],\n \"landlord_up\": [0, 1, 0],\n \"landlord_down\": [0, 0, 1]\n }\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n landlord_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards_left_list = []\n for pos in [\"landlord\", \"landlord_up\", \"landlord_up\"]:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((position_info_batch, # 3\n my_handcards_batch, # 54\n other_handcards_batch, # 54\n three_landlord_cards_batch, # 54\n last_action_batch, # 54\n landlord_played_cards_batch, # 54\n landlord_up_played_cards_batch, # 54\n landlord_down_played_cards_batch, # 54\n landlord_num_cards_left_batch, # 20\n landlord_up_num_cards_left_batch, # 17\n landlord_down_num_cards_left_batch, # 17\n bomb_num_batch, # 15\n bid_info_batch, # 12\n multiply_info_batch, # 3\n my_action_batch)) # 54\n x_no_action = np.hstack((position_info,\n my_handcards,\n other_handcards,\n three_landlord_cards,\n last_action,\n landlord_played_cards,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_num_cards_left,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num,\n bid_info,\n multiply_info))\n z = _action_seq_list2array(_process_action_seq(\n infoset.card_play_action_seq, 32))\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': position,\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef _get_obs_general(infoset, position):\n num_legal_actions = len(infoset.legal_actions)\n my_handcards = _cards2array(infoset.player_hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards = _cards2array(infoset.other_hand_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n position_map = {\n \"landlord\": [1, 0, 0],\n \"landlord_up\": [0, 1, 0],\n \"landlord_down\": [0, 0, 1]\n }\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_info = np.array(infoset.bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n multiply_info = np.array(infoset.multiply_info)\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n three_landlord_cards = _cards2array(infoset.three_landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array(infoset.last_move)\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j, action in enumerate(infoset.legal_actions):\n my_action_batch[j, :] = _cards2array(action)\n\n landlord_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord'], 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_up'], 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(\n infoset.num_cards_left_dict['landlord_down'], 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n other_handcards_left_list = []\n for pos in [\"landlord\", \"landlord_up\", \"landlord_up\"]:\n if pos != position:\n other_handcards_left_list.extend(infoset.all_handcards[pos])\n\n landlord_played_cards = _cards2array(\n infoset.played_cards['landlord'])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array(\n infoset.played_cards['landlord_up'])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array(\n infoset.played_cards['landlord_down'])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(\n infoset.bomb_num)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n num_cards_left = np.hstack((\n landlord_num_cards_left, # 20\n landlord_up_num_cards_left, # 17\n landlord_down_num_cards_left))\n\n x_batch = np.hstack((\n bid_info_batch, # 12\n multiply_info_batch)) # 3\n x_no_action = np.hstack((\n bid_info,\n multiply_info))\n z =np.vstack((\n num_cards_left,\n my_handcards, # 54\n other_handcards, # 54\n three_landlord_cards, # 54\n landlord_played_cards, # 54\n landlord_up_played_cards, # 54\n landlord_down_played_cards, # 54\n _action_seq_list2array(_process_action_seq(infoset.card_play_action_seq, 32))\n ))\n\n _z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n my_action_batch = my_action_batch[:,np.newaxis,:]\n z_batch = np.zeros([len(_z_batch),40,54],int)\n for i in range(0,len(_z_batch)):\n z_batch[i] = np.vstack((my_action_batch[i],_z_batch[i]))\n obs = {\n 'position': position,\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': infoset.legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n }\n return obs\n\ndef gen_bid_legal_actions(player_id, bid_info):\n self_bid_info = bid_info[:, [(player_id - 1) % 3, player_id, (player_id + 1) % 3]]\n curr_round = -1\n for r in range(4):\n if -1 in self_bid_info[r]:\n curr_round = r\n break\n bid_actions = []\n if curr_round != -1:\n self_bid_info[curr_round] = [0, 0, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n self_bid_info[curr_round] = [0, 1, 0]\n bid_actions.append(np.array(self_bid_info).flatten())\n return np.array(bid_actions)\n\n\ndef _get_obs_for_bid_legacy(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,\n 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n position_info = np.array([0, 0, 0])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = bid_legal_actions\n\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.repeat(multiply_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n three_landlord_cards = _cards2array([])\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(2):\n my_action_batch[j, :] = _cards2array([])\n\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((position_info_batch,\n my_handcards_batch,\n other_handcards_batch,\n three_landlord_cards_batch,\n last_action_batch,\n landlord_played_cards_batch,\n landlord_up_played_cards_batch,\n landlord_down_played_cards_batch,\n landlord_num_cards_left_batch,\n landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch,\n bomb_num_batch,\n bid_info_batch,\n multiply_info_batch,\n my_action_batch))\n x_no_action = np.hstack((position_info,\n my_handcards,\n other_handcards,\n three_landlord_cards,\n last_action,\n landlord_played_cards,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_num_cards_left,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': \"\",\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': bid_legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n \"bid_info_batch\": bid_info_batch.astype(np.int8),\n \"multiply_info\": multiply_info.astype(np.int8)\n }\n return obs\n\ndef _get_obs_for_bid(player_id, bid_info, hand_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,\n 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 2\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_legal_actions = gen_bid_legal_actions(player_id, bid_info)\n bid_info = bid_legal_actions[0]\n bid_info_batch = np.hstack([bid_legal_actions for _ in range(5)])\n\n x_batch = np.hstack((my_handcards_batch,\n bid_info_batch))\n x_no_action = np.hstack((my_handcards))\n obs = {\n 'position': \"\",\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': np.array([0,0]),\n 'legal_actions': bid_legal_actions,\n 'x_no_action': x_no_action.astype(np.int8),\n \"bid_info_batch\": bid_info_batch.astype(np.int8)\n }\n return obs\n\ndef _get_obs_for_multiply(position, bid_info, hand_cards, landlord_cards):\n all_cards = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7,\n 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12,\n 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30]\n num_legal_actions = 3\n my_handcards = _cards2array(hand_cards)\n my_handcards_batch = np.repeat(my_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n other_cards = []\n other_cards.extend(all_cards)\n for card in hand_cards:\n other_cards.remove(card)\n other_handcards = _cards2array(other_cards)\n other_handcards_batch = np.repeat(other_handcards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n position_map = {\n \"landlord\": [1, 0, 0],\n \"landlord_up\": [0, 1, 0],\n \"landlord_down\": [0, 0, 1]\n }\n position_info = np.array(position_map[position])\n position_info_batch = np.repeat(position_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bid_info = np.array(bid_info).flatten()\n bid_info_batch = np.repeat(bid_info[np.newaxis, :],\n num_legal_actions, axis=0)\n\n multiply_info = np.array([0, 0, 0])\n multiply_info_batch = np.array([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\n\n three_landlord_cards = _cards2array(landlord_cards)\n three_landlord_cards_batch = np.repeat(three_landlord_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n last_action = _cards2array([])\n last_action_batch = np.repeat(last_action[np.newaxis, :],\n num_legal_actions, axis=0)\n\n my_action_batch = np.zeros(my_handcards_batch.shape)\n for j in range(num_legal_actions):\n my_action_batch[j, :] = _cards2array([])\n\n landlord_num_cards_left = _get_one_hot_array(0, 20)\n landlord_num_cards_left_batch = np.repeat(\n landlord_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_num_cards_left = _get_one_hot_array(0, 17)\n landlord_up_num_cards_left_batch = np.repeat(\n landlord_up_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_num_cards_left = _get_one_hot_array(0, 17)\n landlord_down_num_cards_left_batch = np.repeat(\n landlord_down_num_cards_left[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_played_cards = _cards2array([])\n landlord_played_cards_batch = np.repeat(\n landlord_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_up_played_cards = _cards2array([])\n landlord_up_played_cards_batch = np.repeat(\n landlord_up_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n landlord_down_played_cards = _cards2array([])\n landlord_down_played_cards_batch = np.repeat(\n landlord_down_played_cards[np.newaxis, :],\n num_legal_actions, axis=0)\n\n bomb_num = _get_one_hot_bomb(0)\n bomb_num_batch = np.repeat(\n bomb_num[np.newaxis, :],\n num_legal_actions, axis=0)\n\n x_batch = np.hstack((position_info_batch,\n my_handcards_batch,\n other_handcards_batch,\n three_landlord_cards_batch,\n last_action_batch,\n landlord_played_cards_batch,\n landlord_up_played_cards_batch,\n landlord_down_played_cards_batch,\n landlord_num_cards_left_batch,\n landlord_up_num_cards_left_batch,\n landlord_down_num_cards_left_batch,\n bomb_num_batch,\n bid_info_batch,\n multiply_info_batch,\n my_action_batch))\n x_no_action = np.hstack((position_info,\n my_handcards,\n other_handcards,\n three_landlord_cards,\n last_action,\n landlord_played_cards,\n landlord_up_played_cards,\n landlord_down_played_cards,\n landlord_num_cards_left,\n landlord_up_num_cards_left,\n landlord_down_num_cards_left,\n bomb_num))\n z = _action_seq_list2array(_process_action_seq([], 32))\n z_batch = np.repeat(\n z[np.newaxis, :, :],\n num_legal_actions, axis=0)\n obs = {\n 'position': \"\",\n 'x_batch': x_batch.astype(np.float32),\n 'z_batch': z_batch.astype(np.float32),\n 'legal_actions': multiply_info_batch,\n 'x_no_action': x_no_action.astype(np.int8),\n 'z': z.astype(np.int8),\n \"bid_info\": bid_info.astype(np.int8),\n \"multiply_info_batch\": multiply_info.astype(np.int8)\n }\n return obs\n",
"step-ids": [
13,
24,
32,
36,
37
]
}
|
[
13,
24,
32,
36,
37
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
UNK_TOKEN = '<unk>'
BOS_TOKEN = '<bos>'
EOS_TOKEN = '<eos>'
PAD_TOKEN = '<pad>'
UNK_IDX = 0
LARGE_POSITIVE_FLOAT = 1e+18
LARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT
GLOVE_NPZ_SHA1 = {'glove.42B.300d': ('glove.42B.300d.npz',
'7deee8f4860744db53ed9e50892effe9883e6d89'), 'glove.6B.100d': (
'glove.6B.100d.npz', '01f80f202fcabcc3e0804898349087bfc191dd1c'),
'glove.6B.200d': ('glove.6B.200d.npz',
'5e6e2bdab346c257f88d80d215d518e680d86e32'), 'glove.6B.300d': (
'glove.6B.300d.npz', '1db264aa936be62f055dfb72854204450bdf4399'),
'glove.6B.50d': ('glove.6B.50d.npz',
'aa16be8d184399d2199f83fd62586f2c30497bfa'), 'glove.840B.300d': (
'glove.840B.300d.npz', 'b4ba390c1154736e07c0e67d9180935f5930e83c'),
'glove.twitter.27B.100d': ('glove.twitter.27B.100d.npz',
'0f7b82c223451d0002f79ba23596983cdbe0e2b1'), 'glove.twitter.27B.200d':
('glove.twitter.27B.200d.npz',
'41cc2d26f58a54622ce96bf6c8434360ab524f20'), 'glove.twitter.27B.25d': (
'glove.twitter.27B.25d.npz', '9f563d2f296995598cc46812b2fda05ad4c3c879'
), 'glove.twitter.27B.50d': ('glove.twitter.27B.50d.npz',
'ce9959c056f2a0a780c468feeb4f823af51630e9')}
FAST_TEXT_NPZ_SHA1 = {'crawl-300d-2M': ('crawl-300d-2M.npz',
'9dd611a1fe280c63050cd546d3595400fc0eede4'), 'wiki.aa': ('wiki.aa.npz',
'48f163b80eb37f1806142169d3d4c05cf75b7339'), 'wiki.ab': ('wiki.ab.npz',
'860ceff119dd27e5b701b605879037c1310cbc3e'), 'wiki.ace': (
'wiki.ace.npz', '62938287464040491719f56a6f521f8f808beee8'), 'wiki.ady':
('wiki.ady.npz', '646843afa260d018ed711df3f1ca9c3e000447b6'), 'wiki.af':
('wiki.af.npz', '7b14cd27690b67fea318d0bac2283c16430680e2'), 'wiki.ak':
('wiki.ak.npz', '20f309adad1c45958c97b6055d5838e05bbaea72'), 'wiki.als':
('wiki.als.npz', 'a8b03aa133c4f7da12fc27c2b167b7918b1e9805'), 'wiki.am':
('wiki.am.npz', 'ed3dd10cea64737f7a1623612ee099df9dc19f66'), 'wiki.ang':
('wiki.ang.npz', '8efe64706d9d6b8eae38b2c7ff0b277e20592bc7'), 'wiki.an':
('wiki.an.npz', '168046283c719ab96a29b1abae2e25a6575c7be8'), 'wiki.arc':
('wiki.arc.npz', '049021b7decea4bc009b12936e56b4dbf5b760e7'), 'wiki.ar':
('wiki.ar.npz', '7e325e1e98dfcdc9368d2ebe40ee834a2ed44912'), 'wiki.arz':
('wiki.arz.npz', '7d851c2c7be3ee6f7fd896de7b76ea08e3fb08b0'), 'wiki.as':
('wiki.as.npz', '01d38c29cd4bd99c1a8534abc058822da14a5b9c'), 'wiki.ast':
('wiki.ast.npz', '9c9846ba5084505a0adea89c95c66e04efbf5ce9'), 'wiki.av':
('wiki.av.npz', '7ef6a920c364638504e673cfde5f7675503fa81e'), 'wiki.ay':
('wiki.ay.npz', 'c1202e110930e3902397f5cb64a8359e013b469f'), 'wiki.azb':
('wiki.azb.npz', '10351b7ef14ec2cb610d290cb6a3f6987ef5d8b3'), 'wiki.az':
('wiki.az.npz', '74257c3bcd533a606afae509ea835dc036d61546'), 'wiki.ba':
('wiki.ba.npz', '4a2857ed694d66864df562b376c2fa12fcb03646'), 'wiki.bar':
('wiki.bar.npz', 'e65c6b7e9ff83798d1eea05d166148837d53e615'),
'wiki.bat_smg': ('wiki.bat_smg.npz',
'6420584ae28ba6c9dd145fea8f096243d457c2d8'), 'wiki.bcl': (
'wiki.bcl.npz', '33606c970ab336b678393e2bdb8af2116d11cf7b'), 'wiki.be':
('wiki.be.npz', '84487d341e333344cf71bc12c7a205d923762498'), 'wiki.bg':
('wiki.bg.npz', '56f2a175b1a1d1a9cf9f1cea277cd0b46ffd7f66'), 'wiki.bh':
('wiki.bh.npz', '07473989853a344a41aaa18f41030dc56d0d01c7'), 'wiki.bi':
('wiki.bi.npz', '08adfa3c9ef3016d30ef69ea539d217ff67eda09'), 'wiki.bjn':
('wiki.bjn.npz', '998a551283222931d3a26922308449950bfa3ec7'), 'wiki.bm':
('wiki.bm.npz', '454ff9fbd4790e4a076d9a2087a51da28aa1332f'), 'wiki.bn':
('wiki.bn.npz', '1f36f6f39c9a9b33bb8035c9a4dc7e04933604fd'), 'wiki.bo':
('wiki.bo.npz', 'b9fe87318428de0a7790de175b5fec80c5af482d'), 'wiki.bpy':
('wiki.bpy.npz', '5c7853173d27e2c018c24eca69de8d5f34511b0d'), 'wiki.br':
('wiki.br.npz', '7aa66a2034fbfaa1d39e637385d48610238797c9'), 'wiki.bs':
('wiki.bs.npz', 'a019a4677677c2e9e4d899326b2b6c15ad6c011a'), 'wiki.bug':
('wiki.bug.npz', '09ae3477941d7a99d1df494368d7efb0b2c18913'),
'wiki.bxr': ('wiki.bxr.npz', 'b832c691b8ddd95896c052d3d15e1f98d72068d5'
), 'wiki.ca': ('wiki.ca.npz',
'391e0d4daad08649251274fa1cc2a5f49c7728b1'), 'wiki.cbk_zam': (
'wiki.cbk_zam.npz', '02e57a763bc9f9eadaba57953383dd12a0a78a37'),
'wiki.cdo': ('wiki.cdo.npz', 'd6e8f422327e8b2273f1f2662d793707ece6695d'
), 'wiki.ceb': ('wiki.ceb.npz',
'23bc0bb9aeaa57dff35092766941a866de142aae'), 'wiki.ce': ('wiki.ce.npz',
'182b2a889256119a6d379d501c55c7621e5855db'), 'wiki.ch': ('wiki.ch.npz',
'82dd77512fcb463481f43c9cef3507e2baa90d7b'), 'wiki.cho': (
'wiki.cho.npz', 'b0b620fc2442d1a6e2440e71a424861c80175f0c'), 'wiki.chr':
('wiki.chr.npz', '3d62c6b95c5af46abd6234426ae760cca65d5bd0'),
'wiki.chy': ('wiki.chy.npz', '34a28a22da79aebc100e3714b825c95c8d5f54a3'
), 'wiki.ckb': ('wiki.ckb.npz',
'ad19461e4be583d08b7693ff5b1e9d590ed41add'), 'wiki.co': ('wiki.co.npz',
'fa60d9f0e79f1c7e15f381aef983a0f4f31c05a8'), 'wiki.crh': (
'wiki.crh.npz', '540270ba6edd9d7b2f7efca52b3b407524ac67d1'), 'wiki.cr':
('wiki.cr.npz', 'f06b77465a38ec960d7d5a7554b848c37e945c76'), 'wiki.csb':
('wiki.csb.npz', 'b8b28559cf2541341af98e2aa755856765bdeabf'), 'wiki.cs':
('wiki.cs.npz', '19881e931fe06abf341450f00c342d364313e232'), 'wiki.cu':
('wiki.cu.npz', '731e0d00abd53bc2a8eb6cf37f6ab883cff34e15'), 'wiki.cv':
('wiki.cv.npz', 'e60034fcffb7dfef7b236ddba1194c3aa20b7967'), 'wiki.cy':
('wiki.cy.npz', '5a0fb967b5556f007c0d5065f951a3d3b1c1005a'), 'wiki.da':
('wiki.da.npz', 'd06258014ba2c7450bc2d55edfdf1731433e42e5'), 'wiki.de':
('wiki.de.npz', 'a21694dfd2af63bd7bb00f0b60b28e88bd1153f1'), 'wiki.diq':
('wiki.diq.npz', '4f6c77a86b39834a7130419967759afd8cc26b84'),
'wiki.dsb': ('wiki.dsb.npz', 'e74f1d346a8db96987bff0c33ee5f886907c380a'
), 'wiki.dv': ('wiki.dv.npz',
'5d6fe6f0eec2e7704121d5aba03b4edbb28af873'), 'wiki.dz': ('wiki.dz.npz',
'77c639d36d0355b2de5adead7996eae342b852a6'), 'wiki.ee': ('wiki.ee.npz',
'4b5a76127d57515d3e8a76787cdefde5856b754a'), 'wiki.el': ('wiki.el.npz',
'a00bcb97e7898931196a1c69f7a492e5b6202661'), 'wiki.eml': (
'wiki.eml.npz', 'b475d626b3d97e7a68c02827fdc7900599e838c6'), 'wiki.en':
('wiki.en.npz', 'ad5ec6d49db6c6fe76b8e85ff05d34e5d0e1eb6a'), 'wiki.eo':
('wiki.eo.npz', '18049b0010520d13e676f5a82e8bb90153d99003'), 'wiki.es':
('wiki.es.npz', 'a6d192ba7d82d762f8367e75ca951aad4d11e410'), 'wiki.et':
('wiki.et.npz', '4beb7025cf88f1aa62d025b187f0cb09aee61858'), 'wiki.eu':
('wiki.eu.npz', '5e1a8197e35f20a2476798bbb935b4c131289c4f'), 'wiki.ext':
('wiki.ext.npz', '049b2d1b0a8b102b45907cf487cac30aa294e0a0'), 'wiki.fa':
('wiki.fa.npz', '81ed274997c87ef87d73d25e166ca06272ce426f'), 'wiki.ff':
('wiki.ff.npz', '4867dc74cd53ca0b0f769af4fa1ea420406b59bf'), 'wiki.fi':
('wiki.fi.npz', '6d1291b854045179f8171ac7d62ede7d8ac159a2'),
'wiki.fiu_vro': ('wiki.fiu_vro.npz',
'dd87806d9dc8833fa0e21e35a50815ebdbaa6c8b'), 'wiki.fj': ('wiki.fj.npz',
'cf5c31b0a69276f5dd18ab738ed92444abaeb755'), 'wiki.fo': ('wiki.fo.npz',
'ffc19807d528af000861a94cfb8097bd686e14fc'), 'wiki.fr': ('wiki.fr.npz',
'8f06d5dbe3cf7214354fe9b2f6eca0ef7419f063'), 'wiki.frp': (
'wiki.frp.npz', 'c8b200ae592478d3cd0bfaafcd7aa19de8a3bfe5'), 'wiki.frr':
('wiki.frr.npz', 'fa5e5c39ea2a45793c679eacea290a35e37405ea'),
'wiki.fur': ('wiki.fur.npz', 'a61a8940d059f25000e3fe23933e5ed0d37e65d3'
), 'wiki.fy': ('wiki.fy.npz',
'46f9f41bdf6f4fb8e27a753290413d745465963b'), 'wiki.gag': (
'wiki.gag.npz', '49fb01230e6803544122d47ab7d3fe694d1444f2'), 'wiki.gan':
('wiki.gan.npz', '716b7b26acc15975f30caf3c6effa111516fcca5'), 'wiki.ga':
('wiki.ga.npz', 'ea934bc1fdc1acf6caf9ac746c6c499251f1fdee'), 'wiki.gd':
('wiki.gd.npz', '597017b5a32d933f194595d3656f858e37e70a62'), 'wiki.glk':
('wiki.glk.npz', '91a5834658bc2d48714e8807ef24efb79567b4b5'), 'wiki.gl':
('wiki.gl.npz', '2fa8e48d6ae1e9c9d542eb3f2156cf9e359e66c2'), 'wiki.gn':
('wiki.gn.npz', 'e359eef3928e1f1b5d8fcf0ea532e8794c66289a'), 'wiki.gom':
('wiki.gom.npz', '8cd361481c23f7545cc2bd8f1bf22aa7400edd4d'),
'wiki.got': ('wiki.got.npz', 'd05daf105611150695e61775fdff2c500b36be3f'
), 'wiki.gu': ('wiki.gu.npz',
'0ce175c5fc39bab4032892f70c9d2bb850af0f4a'), 'wiki.gv': ('wiki.gv.npz',
'2c573f873d607831ff01b64603c17b8db79bd7e1'), 'wiki.hak': (
'wiki.hak.npz', 'e6048727799cdf149f5c50037e0fc59300d33a94'), 'wiki.ha':
('wiki.ha.npz', 'f18ea7286bbd390c5470896b2c99cb1adc740064'), 'wiki.haw':
('wiki.haw.npz', '18bcd85d2e06b1b889f0835fc5b62697fdf32d72'), 'wiki.he':
('wiki.he.npz', '76915ff167b6ecb7b7e22ff0ca46914a55d344af'), 'wiki.hif':
('wiki.hif.npz', '12153aaf98d76d5502ab77a27cd0b9a539f61513'), 'wiki.hi':
('wiki.hi.npz', '249666a598991f6ec147954c6af9e531fd1cd94e'), 'wiki.ho':
('wiki.ho.npz', '3f804fd69780c0789708b56ea9d48715f8e38f26'), 'wiki.hr':
('wiki.hr.npz', '9a3de28e69f97048bfb480b4f83eaab6149f66ad'), 'wiki.hsb':
('wiki.hsb.npz', '7070bf64e13299dd66ac0e9f8e24011a56b6bfe8'), 'wiki.ht':
('wiki.ht.npz', 'a607093d511afeb584d02dc676bc5a27eff66287'), 'wiki.hu':
('wiki.hu.npz', '9b2c4750daf1bcf39768572e874b5afda0e2f0bc'), 'wiki.hy':
('wiki.hy.npz', 'ec0461a102a6fb00bd324f66cefd3c8d55a7093a'), 'wiki.hz':
('wiki.hz.npz', '5dfb8afbdae6b4148c3e55ab459c56a74b46b463'), 'wiki.ia':
('wiki.ia.npz', '4cfaaf053b9513bbf5b2423258c0f01d20256de6'), 'wiki.id':
('wiki.id.npz', 'bace396bb9941cc9e5b2e5f5a19be6db833c5fd4'), 'wiki.ie':
('wiki.ie.npz', '1bae7256c2e763ce6d692d1c0a603d99a8b22826'), 'wiki.ig':
('wiki.ig.npz', '23128e54a5e143891d392d621723bad9cfc8cf7b'), 'wiki.ii':
('wiki.ii.npz', '54bc16d05da512481865a89ecf30260b0acc04dc'), 'wiki.ik':
('wiki.ik.npz', 'f8015227e893d2375699b7d132b306ba381f02ac'), 'wiki.ilo':
('wiki.ilo.npz', '185a11f81bd5d24a34558dda81ee4735f5ba150b'), 'wiki.io':
('wiki.io.npz', 'ddf8180a90aa6ee5be93a2582cc99c535f21363e'), 'wiki.is':
('wiki.is.npz', '968f8dd2a093b279a6f7aaa734008454bf51d724'), 'wiki.it':
('wiki.it.npz', 'fdfb857a309b2c3d29482bb5cc55f21b858d2e6f'), 'wiki.iu':
('wiki.iu.npz', 'fa8896730bd6c24c3473daa22116d1016294e7f7'), 'wiki.jam':
('wiki.jam.npz', 'a8f0d0b99c89ace0a6401b8fcda261d06065faaf'), 'wiki.ja':
('wiki.ja.npz', '8d42e5a40e4d1d8645b2d80b873a65cadcf68b5c'), 'wiki.jbo':
('wiki.jbo.npz', '145fc999ab004b348cf9bf445f0a93a7a145308b'), 'wiki.jv':
('wiki.jv.npz', '66978770bf06e42414395cf5fd8c596044d72bec'), 'wiki.kaa':
('wiki.kaa.npz', '624a640ecb9901b2aba2e9f44ab615146ecb2862'),
'wiki.kab': ('wiki.kab.npz', 'e97f93b6ba65e95c85b7541932cf53c5ad9eb896'
), 'wiki.ka': ('wiki.ka.npz',
'1ca8376e1e0cbd58001c1b51a2d488a2874a6743'), 'wiki.kbd': (
'wiki.kbd.npz', 'f2d2a05b06723ac549784ad5470d84f5742a1352'), 'wiki.kg':
('wiki.kg.npz', 'fa7f6d5f660a173a3e75342d449980eedcdc789e'), 'wiki.ki':
('wiki.ki.npz', '21a8c7c616c0050c51c288861f3423f313e4f634'), 'wiki.kj':
('wiki.kj.npz', 'f3c347509a0d81f4f7fdbb8b22889b8d76e5014e'), 'wiki.kk':
('wiki.kk.npz', 'bc24a3289e1c1e18e16b6789c2f9f92af1e73071'), 'wiki.kl':
('wiki.kl.npz', 'b8b7e7359f067836e2be2ecfe9f35a820b00fe1d'), 'wiki.km':
('wiki.km.npz', 'e053799fd01463808432dc035bef3e36620e2f36'), 'wiki.kn':
('wiki.kn.npz', '2849a0a8b3453e9bf6af05d4c7bd3db881dd1068'), 'wiki.koi':
('wiki.koi.npz', 'a9b02e9bd41833bcd54769f94626019c03f29997'), 'wiki.ko':
('wiki.ko.npz', '764d9896e74b5a26c6884d48bce3bed8ed3a7822'), 'wiki.krc':
('wiki.krc.npz', 'bfe39598c718f1cc95909db7544b3214b308a97c'), 'wiki.kr':
('wiki.kr.npz', '1e6af853d4a8ea7830e116eb9b61ac5d7d9a315c'), 'wiki.ksh':
('wiki.ksh.npz', '66cd0e3e0a0b0282a13960571ebe7cddd7706bf2'), 'wiki.ks':
('wiki.ks.npz', '85f1adaa05b854df4dede745a1aaab3836e60770'), 'wiki.ku':
('wiki.ku.npz', 'faf90584e5a45e6d0f9eeb88399b82abe037d584'), 'wiki.kv':
('wiki.kv.npz', '9f2b41822013a412da9c99fac06eed8be03ca192'), 'wiki.kw':
('wiki.kw.npz', '3eed8a8fc97a2fc79241b8474a458c98d00fc897'), 'wiki.ky':
('wiki.ky.npz', '0116ff90f10a6c0728e1ea86d8a44896ea83270a'), 'wiki.lad':
('wiki.lad.npz', '5af2015b3d1c5e8563f0e92721580988ebe2ce50'), 'wiki.la':
('wiki.la.npz', '7143303a3ea13c7668eb90ea6e3d2ca69857a3be'), 'wiki.lbe':
('wiki.lbe.npz', 'f206a3c35a184ba5d2b32ee68640eadf66c847da'), 'wiki.lb':
('wiki.lb.npz', '143dc6337f3690379282034c460c613d7f144923'), 'wiki.lez':
('wiki.lez.npz', 'b29a680decc6b29f24e8eb9e4f8e11e3419d45f1'), 'wiki.lg':
('wiki.lg.npz', '866640ce62cedbc1d453b7ea3c289c291ad76e13'), 'wiki.lij':
('wiki.lij.npz', '0dcd3d7009ae89b1016ca6cdb99a9f0d70bc4baf'), 'wiki.li':
('wiki.li.npz', '4666b3c238256d7b7623a136db19b8b9f4754734'), 'wiki.lmo':
('wiki.lmo.npz', 'ac89fa7cfe0675950bcb31c66bf3f88a3cfc98f0'), 'wiki.ln':
('wiki.ln.npz', 'fba158719944aabe58e0002a90be0ed77e11702d'), 'wiki.lo':
('wiki.lo.npz', '1e113e340a8a93d385e14502c9c4e3bcdf6c3101'), 'wiki.lrc':
('wiki.lrc.npz', '42cb755f398fba6f0da7949c91e92b55654bd482'),
'wiki.ltg': ('wiki.ltg.npz', '182f75859e228d1162215f28fe7f2dca127624a4'
), 'wiki.lt': ('wiki.lt.npz',
'66aa944bd2e777cb82d6d59b1f2f837b6c48cb37'), 'wiki.lv': ('wiki.lv.npz',
'2be8f926da85694fa998bf79d80b61ebb8d67576'), 'wiki.mai': (
'wiki.mai.npz', 'b8a9c36e2a0f1bb84a44dc762250d2a9007ef637'),
'wiki.map_bms': ('wiki.map_bms.npz',
'6f0394d6b3d08a946e3df4b9355efe94148f018a'), 'wiki.mdf': (
'wiki.mdf.npz', '774ee35334641db57f9ac9069961c5372a5d92e8'), 'wiki.mg':
('wiki.mg.npz', '496c48ef668f08ce95ebb11ce1ce5026b52d935c'), 'wiki.mh':
('wiki.mh.npz', '352edd84f99c5aa277a7306f6cacea1fab065ed3'), 'wiki.mhr':
('wiki.mhr.npz', 'dd78b27a674ac10411cdf74ac32f9391506b17e0'),
'wiki.min': ('wiki.min.npz', '628b406441ab03bc8aa68195ada50bfdc8226f34'
), 'wiki.mi': ('wiki.mi.npz',
'754127b473861cd4f9ae034c9f527a34827b1f00'), 'wiki.mk': ('wiki.mk.npz',
'b09fed4f56c296f13c4020ef1fec498382a38b73'), 'wiki.ml': ('wiki.ml.npz',
'02fb55d97ca2f0408f0e7e8dd6a661bbc3319a2a'), 'wiki.mn': ('wiki.mn.npz',
'08b2c45689aa5d9ec49df96dc7c777ce9b9a0b4b'), 'wiki.mo': ('wiki.mo.npz',
'638c2e8bd2352fd52921b9ae62f578b8357bab49'), 'wiki.mrj': (
'wiki.mrj.npz', 'ec5cf1f4fb8dfdca64d8172974e620eb8fa41626'), 'wiki.mr':
('wiki.mr.npz', '074dd68c947c2f137a3e84b55012925f00213139'), 'wiki.ms':
('wiki.ms.npz', '3dbe9e9d70251de8a374776ff1250a9c3103ee59'), 'wiki.mt':
('wiki.mt.npz', 'f5103998a68d1b178387417436a83123d44aba01'),
'wiki.multi.ar': ('wiki.multi.ar.npz',
'a010d1d81a465c56ebaf596b3e8e8795e7f0f8e3'), 'wiki.multi.bg': (
'wiki.multi.bg.npz', 'c04018f3a600cee170f12a36cdd35b4727a2aade'),
'wiki.multi.ca': ('wiki.multi.ca.npz',
'eef52a0cf20c133ca9065de25f0702861a8cfa29'), 'wiki.multi.cs': (
'wiki.multi.cs.npz', 'c5f547aa78c0e3d7dae67a0334d500bf2a86aa30'),
'wiki.multi.da': ('wiki.multi.da.npz',
'24374f2ee169b33327feeee46da31b0de1622fe4'), 'wiki.multi.de': (
'wiki.multi.de.npz', '2e6c119b345bebd34b56eaaf855d6703889b11f7'),
'wiki.multi.el': ('wiki.multi.el.npz',
'9d122beedb80a2e5334946641e5bafd32c01e76b'), 'wiki.multi.en': (
'wiki.multi.en.npz', '8c3c480b4cb2690304173713a646280613b244a8'),
'wiki.multi.es': ('wiki.multi.es.npz',
'483a22656e4fb2a01e9f4ef8156b261e780850ab'), 'wiki.multi.et': (
'wiki.multi.et.npz', '22498c7b91645a3874fa738b5cfb16bf98b6f97c'),
'wiki.multi.fi': ('wiki.multi.fi.npz',
'765a6f0b63777bff4ae6ca2b461c5889c03d6a70'), 'wiki.multi.fr': (
'wiki.multi.fr.npz', 'decd9aacf600114b8a36072535c0309874a37c83'),
'wiki.multi.he': ('wiki.multi.he.npz',
'7eee940c1b85936f59122f4b1a166223dd946674'), 'wiki.multi.hr': (
'wiki.multi.hr.npz', '1673963416af088f8bf15576afb33d58115db35c'),
'wiki.multi.hu': ('wiki.multi.hu.npz',
'a1fbe6ededf3cbaa3eaa22dd8b20cce4b36cfc6d'), 'wiki.multi.id': (
'wiki.multi.id.npz', '6c3e721febb511ede7db7bf978d65769e4270f5c'),
'wiki.multi.it': ('wiki.multi.it.npz',
'fc5bfc11e0165e8d95c1708573dad5e456826c73'), 'wiki.multi.mk': (
'wiki.multi.mk.npz', '6cd50198355674f156fc863108d9bebf11cfabd9'),
'wiki.multi.nl': ('wiki.multi.nl.npz',
'4fa06b9230c95dfa5a9e9a5d80f1f5ba614d3cbf'), 'wiki.multi.no': (
'wiki.multi.no.npz', '63756168c1101e73fba8d1a5015f32b8892819e6'),
'wiki.multi.pl': ('wiki.multi.pl.npz',
'958b8e8bead965ba1bb1433e1c960fc3e12a10fb'), 'wiki.multi.pt': (
'wiki.multi.pt.npz', '22f07df1609d79b95344ee575ea43141424a1528'),
'wiki.multi.ro': ('wiki.multi.ro.npz',
'73180b3e382519004bf38ea7b86237aacbbe813a'), 'wiki.multi.ru': (
'wiki.multi.ru.npz', '3b2eb9163f35e90bf2ce1cd3c997b354d0c34f59'),
'wiki.multi.sk': ('wiki.multi.sk.npz',
'606a0c3ba9849070c6b6b8c22d920fdeed9a1385'), 'wiki.multi.sl': (
'wiki.multi.sl.npz', '3cfdab5043b8cfe1535cb6dbd4c9e68847ad5904'),
'wiki.multi.sv': ('wiki.multi.sv.npz',
'4f1494885b9a831e87cfa3c15f2204c4a73c0779'), 'wiki.multi.tr': (
'wiki.multi.tr.npz', '54f90d5ddb9a65538a41e37c5a67ed933a5e4885'),
'wiki.multi.uk': ('wiki.multi.uk.npz',
'500fd26b1d7a25b42458012e99f9f76642e0c787'), 'wiki.multi.vi': (
'wiki.multi.vi.npz', '3955809cceb300965c15f9372221417719bb0db8'),
'wiki.mus': ('wiki.mus.npz', 'a5f48934a3fa6eaf4929098046c93fc94dd6bcb6'
), 'wiki.mwl': ('wiki.mwl.npz',
'8a5e2c272166f8a72c5694ca6c3104d5f49179ec'), 'wiki.my': ('wiki.my.npz',
'5e035aca16700d7d6695af8a6d3a88ac847aaeb7'), 'wiki.myv': (
'wiki.myv.npz', 'd4cfaab70c640033e02c0fc0c5a3615ae836c569'), 'wiki.mzn':
('wiki.mzn.npz', 'ad09ac584ae455b5862b95125ef409360ae18445'),
'wiki.nah': ('wiki.nah.npz', '2dc454ef37d059f2053af46cfa1f4f0ca939cba0'
), 'wiki.na': ('wiki.na.npz',
'401f0f880eb7aa78d21348bc1e0a3953b3e81bf0'), 'wiki.nap': (
'wiki.nap.npz', '996da46aeeab5644ba766d00c5e343b1553361d7'),
'wiki.nds_nl': ('wiki.nds_nl.npz',
'5a9307e16b13a5a82ec19a52b33254537e7198e7'), 'wiki.nds': (
'wiki.nds.npz', 'b249a87c78c52becf51e7b50aaf9f9b6a36585f1'), 'wiki.ne':
('wiki.ne.npz', 'a601db2647a74ffd2b4b43dcb8584735f555459c'), 'wiki.new':
('wiki.new.npz', 'c398a3775aba9c68ce765cfdfb6b188f7c47e4c6'),
'wiki-news-300d-1M': ('wiki-news-300d-1M.npz',
'0a03bbd508e5381e140476140fb121afeb0050ed'),
'wiki-news-300d-1M-subword': ('wiki-news-300d-1M-subword.npz',
'69edae21375407781c727dcb9e534e79d712d137'), 'wiki.ng': ('wiki.ng.npz',
'befd774d15f69d43547e13e5ea3a97c4cb1ab405'), 'wiki.nl': ('wiki.nl.npz',
'5a7cb6f1dd0a7621202abba9461ac2c5bf905219'), 'wiki.nn': ('wiki.nn.npz',
'8e5059ddeb24050fadaa5cc4622b13feb3e4a226'), 'wiki.no': ('wiki.no.npz',
'5ce6e0f793e66f081652f64013968099de03d9f9'), 'wiki.nov': (
'wiki.nov.npz', '95ed23b4cfd7a65afa1c12c7dbdce6af53923d77'), 'wiki.vec':
('wiki.vec.npz', '08ebb912efeb9df1c7d05e1af90484d210dff47e'),
'wiki.nrm': ('wiki.nrm.npz', 'e58614b4508ff9810f0b58fd818f973775bc918d'
), 'wiki.nso': ('wiki.nso.npz',
'56a2ebe260241402d117cd89c5c872b9c96ff05b'), 'wiki.nv': ('wiki.nv.npz',
'c713051fe03ec1f60314bb42161b2a47fb5e169a'), 'wiki.ny': ('wiki.ny.npz',
'ba5a1725955cbc13e7fd93ab499f8085840c992c'), 'wiki.oc': ('wiki.oc.npz',
'259e7d994c38a4cfc140fb07016b82d6781e5027'), 'wiki.olo': (
'wiki.olo.npz', '0fea70f887def4779ee70a79366b88f1ada65004'), 'wiki.om':
('wiki.om.npz', '47e2d756b5f8913085d901375c1b4e0b118a4221'), 'wiki.or':
('wiki.or.npz', '7e274ab060219b019aa02bb97941cc6e162fd01f'), 'wiki.os':
('wiki.os.npz', '19e8199cc2aaffdb07b6c558dbc5465ac6e03155'), 'wiki.pag':
('wiki.pag.npz', 'eddf4931547649026c02f893297ef673ec6158bb'),
'wiki.pam': ('wiki.pam.npz', '40109aa174bd9f0fa657839bb548e2b0646c58d3'
), 'wiki.pa': ('wiki.pa.npz',
'8a5870717e9e641b1f757f13259171698118de2e'), 'wiki.pap': (
'wiki.pap.npz', '999c8e5b005ca20d9998fbbe4fa79177f69e24c0'), 'wiki.pcd':
('wiki.pcd.npz', 'e975066b323a65cdc5e4c27138ef674d2cf7250b'),
'wiki.pdc': ('wiki.pdc.npz', '5c770b9d56f276b0aa535845f175c05ee1cea615'
), 'wiki.pfl': ('wiki.pfl.npz',
'0063d0b633ee529a75482b36ed4f4da7d64994ec'), 'wiki.pih': (
'wiki.pih.npz', 'ce1d76c94d248545eea0d7436c54849dbb380bfc'), 'wiki.pi':
('wiki.pi.npz', 'c7d56c334bf529f8b3655693d207a80feaec4aed'), 'wiki.pl':
('wiki.pl.npz', '0d612fdf871a1a4084c867f394940475be899443'), 'wiki.pms':
('wiki.pms.npz', 'ca149a2fb138011315bb6d5d61c7a5647e515e51'),
'wiki.pnb': ('wiki.pnb.npz', '9ec82d02ad8894056c67991cf8ce927bcca74ee2'
), 'wiki.pnt': ('wiki.pnt.npz',
'3f90123407bb8fc838a0a0d3700a14e15f5b26aa'), 'wiki.ps': ('wiki.ps.npz',
'7edebc02ac16f5fab83eb10b7d0fab821a9a4d43'), 'wiki.pt': ('wiki.pt.npz',
'f172fd801edd1ad9d319ba44146d40b5d682a473'), 'wiki.qu': ('wiki.qu.npz',
'68bec60ccfe1826c3b3a8968574488dbc74cdf7b'), 'wiki.rm': ('wiki.rm.npz',
'00fb191fc736ba60cb23e76169dfccde9a9daad0'), 'wiki.rmy': (
'wiki.rmy.npz', 'c5e93cc37ff7293b9a1d9fe55c42d6fbde372b97'), 'wiki.rn':
('wiki.rn.npz', '57b8e0d6999269be227af6ef2797a9cf8386ff1b'),
'wiki.roa_rup': ('wiki.roa_rup.npz',
'e06d6b5672a59bb9e83143bc8b28300d23c09546'), 'wiki.roa_tara': (
'wiki.roa_tara.npz', 'c083105f40236dc3711f06c1b40e8ee7a714b99d'),
'wiki.ro': ('wiki.ro.npz', '766bc0cb58a65b0b1763b9a0d90e91ab982eb20d'),
'wiki.rue': ('wiki.rue.npz', '9a91fa093cd48d7d658d526b0ccda48dc59cd7f4'
), 'wiki.ru': ('wiki.ru.npz',
'd59d099481c22d5592ab9635c9ee48060aa0bf45'), 'wiki.rw': ('wiki.rw.npz',
'e99ee87d249f6c157c5c97397d1025d798b85c69'), 'wiki.sah': (
'wiki.sah.npz', '85dae39097b29bc8e2b64f343a77794e4a62f91a'), 'wiki.sa':
('wiki.sa.npz', '7d1928d7c67400045ac1b35a37a0e3089690d875'), 'wiki.scn':
('wiki.scn.npz', '27d7b8050bbeed8ce196061c610216760b053c39'), 'wiki.sc':
('wiki.sc.npz', '69c7b8be0f03a1bbd615695f93bdd78f96a58e16'), 'wiki.sco':
('wiki.sco.npz', '4880282f59d3338b67fbff75359e2d24896e95bb'), 'wiki.sd':
('wiki.sd.npz', '0ed8da4d27223db717a612cf0c88582351db6e19'), 'wiki.se':
('wiki.se.npz', '0f4b2e060d5e29f96ca73aab29c967e79db69c17'), 'wiki.sg':
('wiki.sg.npz', 'a5e4edf34fe1a88b322da4c3922ec5a470e200c6'), 'wiki.sh':
('wiki.sh.npz', 'c13f1e94676bc939560193f7aa7ffd7d604707b3'),
'wiki.simple': ('wiki.simple.npz',
'352d0575e7d60b08e1dfce2c5de713906f0ed78f'), 'wiki.si': ('wiki.si.npz',
'204f9ffbe7770a9f56d3b2fb26999165015f5c33'), 'wiki.sk': ('wiki.sk.npz',
'7a9820b5a343b242660bf2595d1ecbf6e00a76d6'), 'wiki.sl': ('wiki.sl.npz',
'85f3186f26d6725317a64e290363a7251b928b81'), 'wiki.sm': ('wiki.sm.npz',
'9e13452cc4bff677f4f15db04f9d2f95f6ec054c'), 'wiki.sn': ('wiki.sn.npz',
'e8d5f7dcf51280c5f99bc3df849b4889a61e9fcd'), 'wiki.so': ('wiki.so.npz',
'0f5d71b95768b33fd939a870c15344c4478364a9'), 'wiki.sq': ('wiki.sq.npz',
'8b05826df8575e65c87a2fc0b7630cf644d4216d'), 'wiki.srn': (
'wiki.srn.npz', '2711396ef297ac5dde8904508bc002bdecbcc6f4'), 'wiki.sr':
('wiki.sr.npz', '546edc8e29a5d2e99ed10eb4a552cbef2bb8f417'), 'wiki.ss':
('wiki.ss.npz', '2e5911bad79bb5270a64f587e326d31c95ec58f3'), 'wiki.st':
('wiki.st.npz', '23bc954719a2962e891f02efaea754c9ea025894'), 'wiki.stq':
('wiki.stq.npz', 'dd3ece0c0aa30e53ae0f4b558309bb60ab628652'), 'wiki.su':
('wiki.su.npz', '7e48732e8a1fcf212e692924a4416a6ac3b3b055'), 'wiki.sv':
('wiki.sv.npz', 'b9ec52e9423688f195f3145c243226c0e0b51e83'), 'wiki.sw':
('wiki.sw.npz', '5262f0c645322b10eca73f792a970f10b2719e55'), 'wiki.szl':
('wiki.szl.npz', 'fdd6d6b291cdbbcec5ff93451a588fdd103bb2d0'), 'wiki.ta':
('wiki.ta.npz', 'da7c5bc6e1142306ff2669bf1739832beb6c1763'), 'wiki.tcy':
('wiki.tcy.npz', 'baa49e1afa2bb0dcaaef0fac1ee75bbe711d1134'), 'wiki.te':
('wiki.te.npz', 'baf48767ce85e4d41d65d25f2bbf1c5f559ec18f'), 'wiki.tet':
('wiki.tet.npz', '11e46a893af55344dbe102d530fdfea5d949d3bc'), 'wiki.tg':
('wiki.tg.npz', 'da66abb72ec9ccc602713161e544963d59cc51d7'), 'wiki.th':
('wiki.th.npz', '25e54bf2d305779ec9baa5f344410bd75c7702fc'), 'wiki.ti':
('wiki.ti.npz', '1faf98f3a0eafa7559a4b2a111f43dd1f7b9a05b'), 'wiki.tk':
('wiki.tk.npz', '34c714fa8275fd6abfe86b2d144a043774552a6c'), 'wiki.tl':
('wiki.tl.npz', '7d7f8a0485155bce7a74a1d778824375b0029f53'), 'wiki.tn':
('wiki.tn.npz', 'd0bc3a9b948753ac2283e5e10480c9fa0f6acb53'), 'wiki.to':
('wiki.to.npz', 'e982fc31bcfcf7339988d7aad21ce29ac9e84b0b'), 'wiki.tpi':
('wiki.tpi.npz', '448cef043fa4b7f97825dbf8ee205ef05543bcac'), 'wiki.tr':
('wiki.tr.npz', 'c9830607a4c5134c6191006f1d80bae0ec798fe6'), 'wiki.ts':
('wiki.ts.npz', '84a0598803712c8a713943447ddb73fc0f39af43'), 'wiki.tt':
('wiki.tt.npz', '82c29df18f33e6284af3e977a6dda7e132a7a225'), 'wiki.tum':
('wiki.tum.npz', '358990b894a3fb09d70674465952d828c9b0eda7'), 'wiki.tw':
('wiki.tw.npz', '1e6d2838a4f271c1808795fb929cfcbf95094d93'), 'wiki.ty':
('wiki.ty.npz', 'e41ca5192d8cb515b3561c8d6935b150deb027b7'), 'wiki.tyv':
('wiki.tyv.npz', 'ce062ed32e854604714b65698ae290c99ba28060'),
'wiki.udm': ('wiki.udm.npz', '9e1c5891ee0c5ac8f65fc457e1b42c7b2bfc8d37'
), 'wiki.ug': ('wiki.ug.npz',
'656503e54063e200980e39f00fc011395bcd8551'), 'wiki.uk': ('wiki.uk.npz',
'352b7ee24d9fc6513fff4fe13bc04086c680834a'), 'wiki.ur': ('wiki.ur.npz',
'a81e55c7adfc2cef779ce9a01fe21319a7e4943b'), 'wiki.uz': ('wiki.uz.npz',
'd60d1e67bb8574dd71c18c88114aba674fc1eecb'), 'wiki.ve': ('wiki.ve.npz',
'5bfc3dbb3e47d23597df47ef12bd1c64ab8d3ea9'), 'wiki.vep': (
'wiki.vep.npz', '7a94355754fbe56802242c0bf9d7a27335095552'), 'wiki.vi':
('wiki.vi.npz', 'f118039eb16a4ca3347b6b171eac41113350a041'), 'wiki.vls':
('wiki.vls.npz', '9a46a2fdc6448aa54f212081643745499ea7d05c'), 'wiki.vo':
('wiki.vo.npz', '8e2f93c85ac608bcc4ae14093b9ff016061378fb'), 'wiki.wa':
('wiki.wa.npz', '907074f7743d30cdbb2c48d0c8b4040796ea4164'), 'wiki.war':
('wiki.war.npz', '928fb410c394b9c18d875326b6a3e750e2611e1b'), 'wiki.wo':
('wiki.wo.npz', '7bb352be44f7261aa926f49b13e77df30f29312f'), 'wiki.wuu':
('wiki.wuu.npz', '0d1dc7b05867ff2156a1180ad3da3b4697924e59'),
'wiki.xal': ('wiki.xal.npz', 'd87f4a131e086dc0bdc2a7e10406820c3c03b6a9'
), 'wiki.xh': ('wiki.xh.npz',
'c64e1d2e77d1c744a628e2bd7353284616e48bea'), 'wiki.xmf': (
'wiki.xmf.npz', '160b9ee9773b9099aaf37ae9bdbc8a4a93b7f6ea'), 'wiki.yi':
('wiki.yi.npz', '0662542cee29f3392fc905004ac6443b32c1477c'), 'wiki.yo':
('wiki.yo.npz', '5d12d3b902a1fa19d8548295c3802c0608afa5c8'), 'wiki.za':
('wiki.za.npz', '536348ff89df62e968739b567a1245bfd4112fbe'), 'wiki.zea':
('wiki.zea.npz', '61fa192289a7c0f73ffa8035632a38b91c31c224'),
'wiki.zh_classical': ('wiki.zh_classical.npz',
'9acc9eaf8ebe316b945fb1f56ac71a2b7e024854'), 'wiki.zh_min_nan': (
'wiki.zh_min_nan.npz', '5d38bc025c82af578299d60f7df7b399de6ed81a'),
'wiki.zh': ('wiki.zh.npz', '94007fcf3b105bf2c21b84a3a22bdb7946e74804'),
'wiki.zh_yue': ('wiki.zh_yue.npz',
'af6f0d94e6418d528d6cedd859e07e6e2fb416ab'), 'wiki.zu': ('wiki.zu.npz',
'fc9ce07d5d0c49a3c86cf1b26056ada58f9404ca')}
GOOGLEANALOGY_CATEGORIES = ['capital-common-countries', 'capital-world',
'currency', 'city-in-state', 'family', 'gram1-adjective-to-adverb',
'gram2-opposite', 'gram3-comparative', 'gram4-superlative',
'gram5-present-participle', 'gram6-nationality-adjective',
'gram7-past-tense', 'gram8-plural', 'gram9-plural-verbs']
BATS_CHECKSUMS = {
'BATS_3.0/1_Inflectional_morphology/I01 [noun - plural_reg].txt':
'cfcba2835edf81abf11b84defd2f4daa3ca0b0bf',
'BATS_3.0/1_Inflectional_morphology/I02 [noun - plural_irreg].txt':
'44dbc56432b79ff5ce2ef80b6840a8aa916524f9',
'BATS_3.0/1_Inflectional_morphology/I03 [adj - comparative].txt':
'dc530918e98b467b8102a7dab772a66d3db32a73',
'BATS_3.0/1_Inflectional_morphology/I04 [adj - superlative].txt':
'6c6fdfb6c733bc9b298d95013765163f42faf6fb',
'BATS_3.0/1_Inflectional_morphology/I05 [verb_inf - 3pSg].txt':
'39fa47ec7238ddb3f9818bc586f23f55b55418d8',
'BATS_3.0/1_Inflectional_morphology/I06 [verb_inf - Ving].txt':
'8fabeb9f5af6c3e7154a220b7034bbe5b900c36f',
'BATS_3.0/1_Inflectional_morphology/I07 [verb_inf - Ved].txt':
'aa04df95aa2edb436cbcc03c7b15bc492ece52d6',
'BATS_3.0/1_Inflectional_morphology/I08 [verb_Ving - 3pSg].txt':
'5f22d8121a5043ce76d3b6b53a49a7bb3fe33920',
'BATS_3.0/1_Inflectional_morphology/I09 [verb_Ving - Ved].txt':
'377777c1e793c638e72c010228156d01f916708e',
'BATS_3.0/1_Inflectional_morphology/I10 [verb_3pSg - Ved].txt':
'051c0c3c633e10900f827991dac14cf76da7f022',
'BATS_3.0/2_Derivational_morphology/D01 [noun+less_reg].txt':
'5d6839e9d34ee1e9fddb5bbf6516cf6420b85d8d',
'BATS_3.0/2_Derivational_morphology/D02 [un+adj_reg].txt':
'80b82227a0d5f7377f1e8cebe28c582bfeb1afb5',
'BATS_3.0/2_Derivational_morphology/D03 [adj+ly_reg].txt':
'223e120bd61b3116298a253f392654c15ad5a39a',
'BATS_3.0/2_Derivational_morphology/D04 [over+adj_reg].txt':
'a56f8685af489bcd09c36f864eba1657ce0a7c28',
'BATS_3.0/2_Derivational_morphology/D05 [adj+ness_reg].txt':
'5da99b1f1781ecfb4a1a7448c715abf07451917b',
'BATS_3.0/2_Derivational_morphology/D06 [re+verb_reg].txt':
'4c5e1796091fade503fbf0bfc2fae2c7f98b5dd2',
'BATS_3.0/2_Derivational_morphology/D07 [verb+able_reg].txt':
'a6218162bc257d98e875fc667c23edfac59e19fd',
'BATS_3.0/2_Derivational_morphology/D08 [verb+er_irreg].txt':
'9a4236c3bbc23903e101a42fb5ad6e15e552fadf',
'BATS_3.0/2_Derivational_morphology/D09 [verb+tion_irreg].txt':
'3ab0153926d5cf890cf08a4077da6d9946133874',
'BATS_3.0/2_Derivational_morphology/D10 [verb+ment_irreg].txt':
'2a012b87a9a60e128e064c5fe24b60f99e16ddce',
'BATS_3.0/3_Encyclopedic_semantics/E01 [country - capital].txt':
'9890315d3c4e6a38b8ae5fc441858564be3d3dc4',
'BATS_3.0/3_Encyclopedic_semantics/E02 [country - language].txt':
'ef08a00e8ff7802811ace8f00fabac41b5d03678',
'BATS_3.0/3_Encyclopedic_semantics/E03 [UK_city - county].txt':
'754957101c93a25b438785bd4458404cd9010259',
'BATS_3.0/3_Encyclopedic_semantics/E04 [name - nationality].txt':
'71a6562c34fb6154992a7c3e499375fcc3529c96',
'BATS_3.0/3_Encyclopedic_semantics/E05 [name - occupation].txt':
'a9a6f9f1af959aef83106f3dbd6bed16dfe9a3ea',
'BATS_3.0/3_Encyclopedic_semantics/E06 [animal - young].txt':
'12d5b51c7b76b9136eadc719abc8cf4806c67b73',
'BATS_3.0/3_Encyclopedic_semantics/E07 [animal - sound].txt':
'91991b007a35f45bd42bd7d0d465c6f8311df911',
'BATS_3.0/3_Encyclopedic_semantics/E08 [animal - shelter].txt':
'e5af11e216db392986ba0cbb597d861066c29adb',
'BATS_3.0/3_Encyclopedic_semantics/E09 [things - color].txt':
'd30b2eb2fc7a60f19afda7c54582e30f6fe28f51',
'BATS_3.0/3_Encyclopedic_semantics/E10 [male - female].txt':
'247a588671bc1da8f615e14076bd42573d24b4b3',
'BATS_3.0/4_Lexicographic_semantics/L01 [hypernyms - animals].txt':
'4b5c4dabe2c9c038fafee85d8d3958f1b1dec987',
'BATS_3.0/4_Lexicographic_semantics/L02 [hypernyms - misc].txt':
'83d5ecad78d9de28fd70347731c7ee5918ba43c9',
'BATS_3.0/4_Lexicographic_semantics/L03 [hyponyms - misc].txt':
'a8319856ae2f76b4d4c030ac7e899bb3a06a9a48',
'BATS_3.0/4_Lexicographic_semantics/L04 [meronyms - substance].txt':
'c081e1104e1b40725063f4b39d13d1ec12496bfd',
'BATS_3.0/4_Lexicographic_semantics/L05 [meronyms - member].txt':
'bcbf05f3be76cef990a74674a9999a0bb9790a07',
'BATS_3.0/4_Lexicographic_semantics/L06 [meronyms - part].txt':
'2f9bdcc74b881e1c54b391c9a6e7ea6243b3accc',
'BATS_3.0/4_Lexicographic_semantics/L07 [synonyms - intensity].txt':
'8fa287860b096bef004fe0f6557e4f686e3da81a',
'BATS_3.0/4_Lexicographic_semantics/L08 [synonyms - exact].txt':
'a17c591961bddefd97ae5df71f9d1559ce7900f4',
'BATS_3.0/4_Lexicographic_semantics/L09 [antonyms - gradable].txt':
'117fbb86504c192b33a5469f2f282e741d9c016d',
'BATS_3.0/4_Lexicographic_semantics/L10 [antonyms - binary].txt':
'3cde2f2c2a0606777b8d7d11d099f316416a7224'}
BATS_CATEGORIES = {'I01': '[noun - plural_reg]', 'I02':
'[noun - plural_irreg]', 'I03': '[adj - comparative]', 'I04':
'[adj - superlative]', 'I05': '[verb_inf - 3pSg]', 'I06':
'[verb_inf - Ving]', 'I07': '[verb_inf - Ved]', 'I08':
'[verb_Ving - 3pSg]', 'I09': '[verb_Ving - Ved]', 'I10':
'[verb_3pSg - Ved]', 'D01': '[noun+less_reg]', 'D02': '[un+adj_reg]',
'D03': '[adj+ly_reg]', 'D04': '[over+adj_reg]', 'D05': '[adj+ness_reg]',
'D06': '[re+verb_reg]', 'D07': '[verb+able_reg]', 'D08':
'[verb+er_irreg]', 'D09': '[verb+tion_irreg]', 'D10':
'[verb+ment_irreg]', 'E01': '[country - capital]', 'E02':
'[country - language]', 'E03': '[UK_city - county]', 'E04':
'[name - nationality]', 'E05': '[name - occupation]', 'E06':
'[animal - young]', 'E07': '[animal - sound]', 'E08':
'[animal - shelter]', 'E09': '[things - color]', 'E10':
'[male - female]', 'L01': '[hypernyms - animals]', 'L02':
'[hypernyms - misc]', 'L03': '[hyponyms - misc]', 'L04':
'[meronyms - substance]', 'L05': '[meronyms - member]', 'L06':
'[meronyms - part]', 'L07': '[synonyms - intensity]', 'L08':
'[synonyms - exact]', 'L09': '[antonyms - gradable]', 'L10':
'[antonyms - binary]'}
SEMEVAL17_CHECKSUMS = {'SemEval17-Task2/README.txt':
'ad02d4c22fff8a39c9e89a92ba449ec78750af6b',
'SemEval17-Task2/task2-scorer.jar':
'145ef73ce955656d59e3b67b41f8152e8ee018d8',
'SemEval17-Task2/test/subtask1-monolingual/data/de.test.data.txt':
'6fc840f989d2274509549e472a68fb88dd2e149f',
'SemEval17-Task2/test/subtask1-monolingual/data/en.test.data.txt':
'05293fcbd80b2f4aad9b6518ce1a546ad8f61f33',
'SemEval17-Task2/test/subtask1-monolingual/data/es.test.data.txt':
'552904b5988f9951311290ca8fa0441dd4351d4b',
'SemEval17-Task2/test/subtask1-monolingual/data/fa.test.data.txt':
'29d5970feac5982961bd6ab621ba31f83d3bff77',
'SemEval17-Task2/test/subtask1-monolingual/data/it.test.data.txt':
'c95fe2be8fab37e9c70610117bdedc48a0a8e95c',
'SemEval17-Task2/test/subtask1-monolingual/keys/de.test.gold.txt':
'c51463460495a242cc726d41713c5e00b66fdd18',
'SemEval17-Task2/test/subtask1-monolingual/keys/en.test.gold.txt':
'2d2bb2ed41308cc60e7953cc9036f7dc89141b48',
'SemEval17-Task2/test/subtask1-monolingual/keys/es.test.gold.txt':
'a5842ff17fe3847d15414924826a8eb236018bcc',
'SemEval17-Task2/test/subtask1-monolingual/keys/fa.test.gold.txt':
'717bbe035d8ae2bad59416eb3dd4feb7238b97d4',
'SemEval17-Task2/test/subtask1-monolingual/keys/it.test.gold.txt':
'a342b950109c73afdc86a7829e17c1d8f7c482f0',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-es.test.data.txt':
'ef92b1375762f68c700e050d214d3241ccde2319',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-fa.test.data.txt':
'17aa103981f3193960309bb9b4cc151acaf8136c',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-it.test.data.txt':
'eced15e8565689dd67605a82a782d19ee846222a',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-de.test.data.txt':
'5cb69370a46385a7a3d37cdf2018744be77203a0',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-es.test.data.txt':
'402f7fed52b60e915fb1be49f935395488cf7a7b',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-fa.test.data.txt':
'9bdddbbde3da755f2a700bddfc3ed1cd9324ad48',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-it.test.data.txt':
'd3b37aac79ca10311352309ef9b172f686ecbb80',
'SemEval17-Task2/test/subtask2-crosslingual/data/es-fa.test.data.txt':
'a2959aec346c26475a4a6ad4d950ee0545f2381e',
'SemEval17-Task2/test/subtask2-crosslingual/data/es-it.test.data.txt':
'ca627c30143d9f82a37a8776fabf2cee226dd35c',
'SemEval17-Task2/test/subtask2-crosslingual/data/it-fa.test.data.txt':
'a03d79a6ce7b798356b53b4e85dbe828247b97ef',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-es.test.gold.txt':
'7564130011d38daad582b83135010a2a58796df6',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-fa.test.gold.txt':
'c9e23c2e5e970e7f95550fbac3362d85b82cc569',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-it.test.gold.txt':
'b74cc2609b2bd2ceb5e076f504882a2e0a996a3c',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-de.test.gold.txt':
'428dfdad2a144642c13c24b845e6b7de6bf5f663',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-es.test.gold.txt':
'1dd7ab08a10552486299151cdd32ed19b56db682',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-fa.test.gold.txt':
'17451ac2165aa9b695dae9b1aba20eb8609fb400',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-it.test.gold.txt':
'5041c0b84a603ed85aa0a5cbe4b1c34f69a2fa7c',
'SemEval17-Task2/test/subtask2-crosslingual/keys/es-fa.test.gold.txt':
'8c09a219670dc32ab3864078bf0c28a287accabc',
'SemEval17-Task2/test/subtask2-crosslingual/keys/es-it.test.gold.txt':
'b1cdd13209354cc2fc2f4226c80aaa85558daf4a',
'SemEval17-Task2/test/subtask2-crosslingual/keys/it-fa.test.gold.txt':
'e0b560bb1d2db39ce45e841c8aad611734dc94f1',
'SemEval17-Task2/trial/subtask1-monolingual/data/de.trial.data.txt':
'dd071fd90f59bec8d271a447d86ee2e462941f52',
'SemEval17-Task2/trial/subtask1-monolingual/data/en.trial.data.txt':
'e8e5add0850b3dec07f102be26b8791a5e9bbbcf',
'SemEval17-Task2/trial/subtask1-monolingual/data/es.trial.data.txt':
'8956c78ff9ceae1d923a57816e55392c6a7dfc49',
'SemEval17-Task2/trial/subtask1-monolingual/data/fa.trial.data.txt':
'2f7c4247cde0d918b3508e90f6b49a1f5031c81b',
'SemEval17-Task2/trial/subtask1-monolingual/data/it.trial.data.txt':
'c11e0b5b55f94fc97c7b11fa455e71b071be879f',
'SemEval17-Task2/trial/subtask1-monolingual/keys/de.trial.gold.txt':
'ce5567b1accf3eb07da53229dfcb2a8a1dfac380',
'SemEval17-Task2/trial/subtask1-monolingual/keys/en.trial.gold.txt':
'693cb5928e807c79e39136dc0981dadca7832ae6',
'SemEval17-Task2/trial/subtask1-monolingual/keys/es.trial.gold.txt':
'8241ca66bf5ba55f77607e9bcfae8e34902715d8',
'SemEval17-Task2/trial/subtask1-monolingual/keys/fa.trial.gold.txt':
'd30701a93c8c5500b82ac2334ed8410f9a23864b',
'SemEval17-Task2/trial/subtask1-monolingual/keys/it.trial.gold.txt':
'bad225573e1216ba8b35429e9fa520a20e8ce031',
'SemEval17-Task2/trial/subtask1-monolingual/output/de.trial.sample.output.txt'
: 'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/en.trial.sample.output.txt'
: 'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/es.trial.sample.output.txt'
: 'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/fa.trial.sample.output.txt'
: 'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/it.trial.sample.output.txt'
: 'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-es.trial.data.txt':
'c27c8977d8d4434fdc3e59a7b0121d87e0a03237',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-fa.trial.data.txt':
'88a6f6dd1bba309f7cae7281405e37f442782983',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-it.trial.data.txt':
'ebdab0859f3b349fa0120fc8ab98be3394f0d73d',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-de.trial.data.txt':
'128d1a460fe9836b66f0fcdf59455b02edb9f258',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-es.trial.data.txt':
'508c5dde8ffcc32ee3009a0d020c7c96a338e1d1',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-fa.trial.data.txt':
'1a3640eb5facfe15b1e23a07183a2e62ed80c7d9',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-it.trial.data.txt':
'141c83d591b0292016583d9c23a2cc5514a006aa',
'SemEval17-Task2/trial/subtask2-crosslingual/data/es-fa.trial.data.txt':
'a0a548cd698c389ee80c34d6ec72abed5f1625e5',
'SemEval17-Task2/trial/subtask2-crosslingual/data/es-it.trial.data.txt':
'8d42bed8a43ff93d26ca95794758d9392ca707ed',
'SemEval17-Task2/trial/subtask2-crosslingual/data/it-fa.trial.data.txt':
'9c85223f1f734de61c28157df0ce417bb0537803',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-es.trial.gold.txt':
'126c92b2fb3b8f2784dd4ae2a4c52b02a87a8196',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-fa.trial.gold.txt':
'1db6201c2c8f19744c39dbde8bd4a803859d64c1',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-it.trial.gold.txt':
'5300bf2ead163ff3981fb41ec5d0e291c287c9e0',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-de.trial.gold.txt':
'd4f5205de929bb0c4020e1502a3f2204b5accd51',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-es.trial.gold.txt':
'3237e11c3a0d9c0f5d583f8dc1d025b97a1f8bfe',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-fa.trial.gold.txt':
'c14de7bf326907336a02d499c9b92ab229f3f4f8',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-it.trial.gold.txt':
'3c0276c4b4e7a6d8a618bbe1ab0f30ad7b07929c',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-fa.trial.gold.txt':
'359f69e9dfd6411a936baa3392b8f05c398a7707',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-it.trial.gold.txt':
'44090607fabe5a26926a384e521ef1317f6f00d0',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/it-fa.trial.gold.txt':
'97b09ffa11803023c2143fd4a4ac4bbc9775e645',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-es.trial.sample.output.txt'
: 'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-fa.trial.sample.output.txt'
: 'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-it.trial.sample.output.txt'
: 'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-de.trial.sample.output.txt'
: 'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-es.trial.sample.output.txt'
: 'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-fa.trial.sample.output.txt'
: 'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-it.trial.sample.output.txt'
: 'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/es-fa.trial.sample.output.txt'
: 'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/es-it.trial.sample.output.txt'
: 'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/it-fa.trial.sample.output.txt'
: 'a0735361a692be357963959728dacef85ea08240'}
UD21_DATA_FILE_SHA1 = {'af': {'dev': ('af-ud-dev.conllu',
'e37b104f4425ee00afc81779201816d5ac525194'), 'test': (
'af-ud-test.conllu', 'd2bf02370d308ee957c04242bd0871db0e488389'),
'train': ('af-ud-train.conllu',
'a652c7b19c236063d3ea489947f83095893b699a')}, 'grc_proiel': {'dev': (
'grc_proiel-ud-dev.conllu', 'd199530c7e40ff0214e510957bb126af0dc12c1c'),
'test': ('grc_proiel-ud-test.conllu',
'bb7825ddeb18fc2d86638e4725f04563f3e08aab'), 'train': (
'grc_proiel-ud-train.conllu',
'fe6c861299b033abe8c4ce2b6131cd74f87b96a7')}, 'grc': {'dev': (
'grc-ud-dev.conllu', 'debdfec0272cd558ccd29fe0ae2f13175dd20a33'),
'test': ('grc-ud-test.conllu',
'f19accf31db95e2c736d716d3438c09aa877eb07'), 'train': (
'grc-ud-train.conllu', 'e98d3eabea67787c5d43a498f5a0fa4246f38104')},
'ar_nyuad': {'dev': ('ar_nyuad-ud-dev.conllu',
'b740de9bd68e68b30b9b313eb050d44e94470ca5'), 'test': (
'ar_nyuad-ud-test.conllu', 'f5d5b8979b7fedd76235d4bae77e0b4a7b0a750a'),
'train': ('ar_nyuad-ud-train.conllu',
'd065f03958fd8782a7431b6778c6665ad09444a6')}, 'ar_pud': {'test': (
'ar_pud-ud-test.conllu', '2161701e6726b6feb14733a312fba6160b9eb722')},
'ar': {'dev': ('ar-ud-dev.conllu',
'5f8964974d5ba5eb3504cdafb93c34c473c4177c'), 'test': (
'ar-ud-test.conllu', '58df161047f310cc3bb4d0e615ca33466e630bb9'),
'train': ('ar-ud-train.conllu',
'0a3d5cefa1fecd6a74f2016ee73ea7a7a02eb359')}, 'eu': {'dev': (
'eu-ud-dev.conllu', '3ee15b5ed46ec93d7278c8cc0351d242417d553d'), 'test':
('eu-ud-test.conllu', 'aa68d6442ac6dc1abedc19c1b98c4a9944786188'),
'train': ('eu-ud-train.conllu',
'd56ec997916e38ee6ab1badd78c119e81e4797c9')}, 'be': {'dev': (
'be-ud-dev.conllu', '015473e91cf8937c46e8b721f206415abac16a35'), 'test':
('be-ud-test.conllu', 'f009ea1885f54cfd77fca8a2c89133b2af8f9f5e'),
'train': ('be-ud-train.conllu',
'26b871e28d2f356a709f106b6e3e86b417ba74e7')}, 'bg': {'dev': (
'bg-ud-dev.conllu', '0a2284b10547681eb65691eb2a9f0f1662e16e90'), 'test':
('bg-ud-test.conllu', '75ea2a5e1d55bb57efecae6ec2b5ac3cc1b37e57'),
'train': ('bg-ud-train.conllu',
'd4b2fa267010c4486885c91f3af65ff66c8be94c')}, 'bxr': {'sample': (
'bxr-ud-sample.conllu', '9239bdd251a60820c71111ec54de9e7d58a8579d'),
'test': ('bxr-ud-test.conllu',
'0a06e527454ae0b547153222f67eb5db94e528fd')}, 'yue': {'test': (
'yue-ud-test.conllu', 'd91477c65aa75cd45489cca13f7a122066972bdb')},
'ca': {'dev': ('ca-ud-dev.conllu',
'5737824f0afff0d07a43db331f102d62c6da2d96'), 'test': (
'ca-ud-test.conllu', '0e28bd2a3b982515c1158194ad52bcbbe741e170'),
'train': ('ca-ud-train.conllu',
'b5ff2392722d4a1df3bfc52fa5b8f2043b7aec0c')}, 'zh_cfl': {'test': (
'zh_cfl-ud-test.conllu', '32fe45cd0e4e11ced95202971bce74acbc6a8c30')},
'zh_hk': {'test': ('zh_hk-ud-test.conllu',
'4c75fa5bbcdcb181447b4e037224d50feb2776fb')}, 'zh_pud': {'test': (
'zh_pud-ud-test.conllu', 'b3e448884b7b6229379f9723b97c6e9a6fedcb61')},
'zh': {'dev': ('zh-ud-dev.conllu',
'34d8253b35ad2245d59ddffa71b5689ef267b6b2'), 'test': (
'zh-ud-test.conllu', '0f00516097650c12262298dd0fbd1b17a6d2bfe2'),
'train': ('zh-ud-train.conllu',
'9444eec5f4561f289ad140e47e49013689512a65')}, 'cop': {'dev': (
'cop-ud-dev.conllu', '863d1004df1a92df52515105f6fae6ff68539595'),
'test': ('cop-ud-test.conllu',
'd3b33566679f071d4ad622ad840cd98381835706'), 'train': (
'cop-ud-train.conllu', '33d0e5de5d6077f7c52a4cd90bce0047f3e9ff6f')},
'hr': {'dev': ('hr-ud-dev.conllu',
'8da2a419980807d2e91e09b6bf496e58d442b0ba'), 'test': (
'hr-ud-test.conllu', '49d673cba3d32d39d413e557276a45a0214ed83e'),
'train': ('hr-ud-train.conllu',
'e5cc686bb46c80c84c3ac60ed459e1f124c04c08')}, 'cs_cac': {'dev': (
'cs_cac-ud-dev.conllu', '69dfed28c29146b41a3428f4715bde70a6aecf00'),
'test': ('cs_cac-ud-test.conllu',
'a994b33ebbde486c1818a9df460fb112055e95de'), 'train': (
'cs_cac-ud-train.conllu', '694f8559471dc481612606bf5df078daa094a84e')},
'cs_cltt': {'dev': ('cs_cltt-ud-dev.conllu',
'f35d5dbe57cd95760901ea29de4f493d5d2a44d4'), 'test': (
'cs_cltt-ud-test.conllu', 'a8f6696785e658471f759bc736b738a105cba9a3'),
'train': ('cs_cltt-ud-train.conllu',
'ab97886066bfa462e5da03d25f802489292c0b56')}, 'cs_fictree': {'dev': (
'cs_fictree-ud-dev.conllu', 'dc67c07737a3a8bf2633068941f2d55f1500e192'),
'test': ('cs_fictree-ud-test.conllu',
'06becaedef1cfdb8e1b2dce3f0d3a3a607d178a4'), 'train': (
'cs_fictree-ud-train.conllu',
'fe7dbe3a0e6ee73e19e788c43bbb8f8f47ae1645')}, 'cs_pud': {'test': (
'cs_pud-ud-test.conllu', '9f205677041de694157ba2ef3e1eadb44d467f2f')},
'cs': {'dev': ('cs-ud-dev.conllu',
'd609e895b21b8710337e23a98b58ffd7b7a54bf1'), 'test': (
'cs-ud-test.conllu', '34091286a11b1ce2a9c8bcfa03fdd86fb0e13965'),
'train': ('cs-ud-train.conllu',
'd1f855798a29d433b580d01ade0d8d062cd58534')}, 'da': {'dev': (
'da-ud-dev.conllu', '2c0c798c20a2efb30273172d388342a82bb0ce3c'), 'test':
('da-ud-test.conllu', '85a95a8527f8773f1575ceaf0ab51f204b211047'),
'train': ('da-ud-train.conllu',
'b653c029a7ae5c106f865dcef949fb3fe2aa0420')}, 'nl_lassysmall': {'dev':
('nl_lassysmall-ud-dev.conllu',
'2a169af74c2206c9073c3932b4a300492a314ee5'), 'test': (
'nl_lassysmall-ud-test.conllu',
'39f08896a40ad370f2acc37d58689cdc43a660a9'), 'train': (
'nl_lassysmall-ud-train.conllu',
'e4fd6bac246c81bb17a3c932e251b8662739cc19')}, 'nl': {'dev': (
'nl-ud-dev.conllu', '33a9387eef9f5c0b15bd1e76e78776863f1f6d90'), 'test':
('nl-ud-test.conllu', '01b3e1048792c851fdd59882c353fcdb76dc165e'),
'train': ('nl-ud-train.conllu',
'8e6a10152b7d09ce61433dd5f715ab2401611cf6')}, 'en_lines': {'dev': (
'en_lines-ud-dev.conllu', '83b63b7670ea4394b558bc26e16a004339f0a0ef'),
'test': ('en_lines-ud-test.conllu',
'ccc9d3c71a873313d138c3adb12405a97eb270d8'), 'train': (
'en_lines-ud-train.conllu', 'da42bfac9fd97d98ebbbc37c65d83ff4c53b4e79')
}, 'en_pud': {'test': ('en_pud-ud-test.conllu',
'4a9c83ba058a7e51979af790ba0440cc274b948f')}, 'en_partut': {'dev': (
'en_partut-ud-dev.conllu', '863a6f571158acaaca95223e50bd08fc0c1134f0'),
'test': ('en_partut-ud-test.conllu',
'0c0780b0f14e4623f1014e6496d639cd2d2f6ffd'), 'train': (
'en_partut-ud-train.conllu', 'e00a2d6f7efa28c8aaa40dccdf29b59a50f48e18'
)}, 'en': {'dev': ('en-ud-dev.conllu',
'e2159dda4400d289ad8a403b466c8d23d733ba35'), 'test': (
'en-ud-test.conllu', 'bd36ef23f76155625b379d063427bd62f19b7658'),
'train': ('en-ud-train.conllu',
'993c44f62104971fe2d056847349facbb7986258')}, 'et': {'dev': (
'et-ud-dev.conllu', '312f9477f7ee1dd380c1fbcf77a6f0c63476fdbb'), 'test':
('et-ud-test.conllu', 'd70907f0771b41a27406672b9d91043a0954f946'),
'train': ('et-ud-train.conllu',
'b6d788e7a3362d0984d1cff06c1ba3d66f6bf773')}, 'fi_ftb': {'dev': (
'fi_ftb-ud-dev.conllu', '552ec574acdb3209e7545af4e16a43a1e2956979'),
'test': ('fi_ftb-ud-test.conllu',
'13c34838a0fa9e379f9624ed1f4c368ca50a7d98'), 'train': (
'fi_ftb-ud-train.conllu', '73d025250bfc82a24181b5ed601dc4ae7c8e846c')},
'fi_pud': {'test': ('fi_pud-ud-test.conllu',
'4ab7b0d99ce6697d79732e401be97585a28c2afa')}, 'fi': {'dev': (
'fi-ud-dev.conllu', 'e023cf7eaffbda20bd4518d87fe9086207bb5361'), 'test':
('fi-ud-test.conllu', 'fd57c5106e43994250f4472890572bdbb8b4a48b'),
'train': ('fi-ud-train.conllu',
'ab27bda8cbb62886196b78de87985a4c6cf8215d')}, 'fr_ftb': {'dev': (
'fr_ftb-ud-dev.conllu', '71b3cc02601f64711f98e33a6b2af10aa00700be'),
'test': ('fr_ftb-ud-test.conllu',
'723b8c44e74202a18b7e71268b738a5e1aa15f86'), 'train': (
'fr_ftb-ud-train.conllu', '9a347120478254647deb7c7e02871b28aad23ec4')},
'fr_pud': {'test': ('fr_pud-ud-test.conllu',
'570b7e31dc359ed62123bea6546efa13cfc2cf25')}, 'fr_partut': {'dev': (
'fr_partut-ud-dev.conllu', '1505030048829a8dccc466cc86bca057996301ae'),
'test': ('fr_partut-ud-test.conllu',
'f6446317c9f82cc0b70a76be75282804a3359ac0'), 'train': (
'fr_partut-ud-train.conllu', 'f87c246cfa91186b90c7780cb64783034f196622'
)}, 'fr_sequoia': {'dev': ('fr_sequoia-ud-dev.conllu',
'859b10d80c7b3a382571cce9b2620039673539d1'), 'test': (
'fr_sequoia-ud-test.conllu', 'be0ef69e392e64030414748da2995433f23e033d'
), 'train': ('fr_sequoia-ud-train.conllu',
'48ac01913518888a32670a687123ed1bac57e0e9')}, 'fr': {'dev': (
'fr-ud-dev.conllu', '5de0aee778bcc69d14285ada88f0ff7e5ac0a0cd'), 'test':
('fr-ud-test.conllu', 'd20a014acd38193155a33a5233c13f89541c78c3'),
'train': ('fr-ud-train.conllu',
'feee0cc85a2d7dcb3397399ef22c8af8ef75420b')}, 'gl_treegal': {'dev': (
'gl_treegal-ud-dev.conllu', '272558614cff4a5e1f2805626904e6dc488b8d25'),
'test': ('gl_treegal-ud-test.conllu',
'18d99474d3aa9c83878c42a79d7881330dd9b861'), 'train': (
'gl_treegal-ud-train.conllu',
'b1691dd5f587a19eb9dc6f141ecbd3eec3bb0e07')}, 'gl': {'dev': (
'gl-ud-dev.conllu', 'e72390dce9bf973442deef31ed0cd7a975361fe5'), 'test':
('gl-ud-test.conllu', '7d82ba3672bd4427674428e1dcbcae4feebc3aeb'),
'train': ('gl-ud-train.conllu',
'd586e7bffa314f8c5b85288e060e68dddc1f5d33')}, 'de_pud': {'test': (
'de_pud-ud-test.conllu', '2c91e42b7345145290b68385ff5270910048b8c4')},
'de': {'dev': ('de-ud-dev.conllu',
'9b4f49bfa2b609d54369890d9e7d8d24a3c229af'), 'test': (
'de-ud-test.conllu', '48f0f6f98b38710906481b5e9fe1d459d28f1b4a'),
'train': ('de-ud-train.conllu',
'04a1d6a6a2da9d9c38496118e0432c9a6720db64')}, 'got': {'dev': (
'got-ud-dev.conllu', '501c47193ca2af5826e4afcc04941df87a7c47c3'),
'test': ('got-ud-test.conllu',
'cfcf16d562434987562bd1f5faa0d8c007e9ddb8'), 'train': (
'got-ud-train.conllu', 'b4951ede89d947c6617df782ac248566235f78fb')},
'el': {'dev': ('el-ud-dev.conllu',
'9df0919ed6f9dcab3ba3f60f0ad31d0c79ae6cdb'), 'test': (
'el-ud-test.conllu', '1bb4a6b24521f0c3c7d6cf71e2456ef3a1ee31aa'),
'train': ('el-ud-train.conllu',
'32f4abc821624c4cd4d3b3b555c1558f06366e2c')}, 'he': {'dev': (
'he-ud-dev.conllu', 'c5b76874fcf11c7733e1555957bb49e8298af140'), 'test':
('he-ud-test.conllu', '4fbe4115948250fc2e42dd43399d1c6c11ddcfd2'),
'train': ('he-ud-train.conllu',
'eae49a515b38d224b109138bf006a112e80a7caf')}, 'hi_pud': {'test': (
'hi_pud-ud-test.conllu', 'd237fecc594186e7a52ad33313ac52e927905d73')},
'hi': {'dev': ('hi-ud-dev.conllu',
'48b592bb1aa1cbc30d41d2913421cfd3f9d2c790'), 'test': (
'hi-ud-test.conllu', '004a7fdde368f32f9f230bc5e2cf4ce9e1d8f8d7'),
'train': ('hi-ud-train.conllu',
'9be8afb2cabda361817c55b3de6ebba2c3fef7e0')}, 'hu': {'dev': (
'hu-ud-dev.conllu', 'ec622e6bcf2a84b0b47eba0de01cf5768157a50e'), 'test':
('hu-ud-test.conllu', 'fd717d25add38c2fb2dc8e82e2f9e5b0b9f3c5b8'),
'train': ('hu-ud-train.conllu',
'e5486523a8bebe40d633ad8b4050be8a3d11c78a')}, 'id': {'dev': (
'id-ud-dev.conllu', '7b181aa954a4f4b22b80a18e4f67cbf423e9c701'), 'test':
('id-ud-test.conllu', '357ed8c216725760bf5be561ed6e918ce602b5ac'),
'train': ('id-ud-train.conllu',
'328ea588b75de55ef48373c2bf9983bca277d724')}, 'ga': {'dev': (
'ga-ud-dev.conllu', '180a1a9dcfcec6528a559032c67e9a15693a039d'), 'test':
('ga-ud-test.conllu', 'b74a56372af3f68f089ea82ba858e5a82aae4e22'),
'train': ('ga-ud-train.conllu',
'40df0b12fbadae6e56c0a01da483d6c612d9450c')}, 'it_pud': {'test': (
'it_pud-ud-test.conllu', 'c7121c03dbdc7d27f89c6f6dd8f046b89233438e')},
'it_partut': {'dev': ('it_partut-ud-dev.conllu',
'0bb5dc0c0815212c9832eaef3b802cf885e0543b'), 'test': (
'it_partut-ud-test.conllu', 'b5eccd3d9a94a2f96c8c3a6e4192a287ac563898'),
'train': ('it_partut-ud-train.conllu',
'784b18bf8d3b59d967d147075a3cb5b03fb28637')}, 'it_postwita': {'dev': (
'it_postwita-ud-dev.conllu', '07f6f658246aa070e2166e688f7569d61aafff54'
), 'test': ('it_postwita-ud-test.conllu',
'c2d58f50e51d37cb5f55bd0a3129138e95a72a8a'), 'train': (
'it_postwita-ud-train.conllu',
'69684c47fba99230f6ef1a204b95c37d28eaa5a6')}, 'it': {'dev': (
'it-ud-dev.conllu', 'ea8fd59f36280fbd77b9a807959491636048a698'), 'test':
('it-ud-test.conllu', '34839fdeeef883f8034c723a18772947106cec6b'),
'train': ('it-ud-train.conllu',
'a0cae413f46a344366f86bc7ffe4f5d7ecbf6a14')}, 'ja_pud': {'test': (
'ja_pud-ud-test.conllu', '4c914016a0968ca434348370d38c9579a60e8fd7')},
'ja': {'dev': ('ja-ud-dev.conllu',
'21f06fef7fbeccd05a298385bf40f8b4ffe95146'), 'test': (
'ja-ud-test.conllu', '240d3532698356a7c6f93c3215718ef2f66a672f'),
'train': ('ja-ud-train.conllu',
'35eaf307d94c2006241fe08f745d7b1b17f049cf')}, 'kk': {'dev': (
'kk-ud-dev.conllu', '038033c822b407040a4ecb87c077506cd0d1a322'), 'test':
('kk-ud-test.conllu', '4124bcaa6e4fc132613d94a882abcff8ecad8ca0'),
'train': ('kk-ud-train.conllu',
'48d664d273ad6731cb65228ce9b57ad3cf50f7f5')}, 'ko': {'dev': (
'ko-ud-dev.conllu', '60e7da7cca44c923873a062e80262726659f5528'), 'test':
('ko-ud-test.conllu', 'bc9a0fc4ddfed14b70bb58048bf8b8d50062cffd'),
'train': ('ko-ud-train.conllu',
'ee21328f9ea39668e802f0cb6a794358f5c256bf')}, 'kmr': {'sample': (
'kmr-ud-sample.conllu', 'd76d631400d17b63b9592ce3c0f4ecada012d6d0'),
'test': ('kmr-ud-test.conllu',
'606a338db2d6adde6b4d7d8c9ee2bdf1f988d729')}, 'la_ittb': {'dev': (
'la_ittb-ud-dev.conllu', 'd9f17992bd0258a734aea9b6c53759039717c86a'),
'test': ('la_ittb-ud-test.conllu',
'f4d097d076083240c48594d4cb058840ff16be8e'), 'train': (
'la_ittb-ud-train.conllu', '627d5b30b20655efab194c75fc9219b0aa2cf4b6')},
'la_proiel': {'dev': ('la_proiel-ud-dev.conllu',
'9a510ff1f29b507ce46d32c04eb8f02ec8bdb4fb'), 'test': (
'la_proiel-ud-test.conllu', '697dbeae38507856a4fafa8506dfc8db5e8e4054'),
'train': ('la_proiel-ud-train.conllu',
'5e57e0a83ed8dcdfcc892c2558249cb6bc02b37a')}, 'la': {'dev': (
'la-ud-dev.conllu', '2748bb0479cb599e1a007d1d1634d5870b45549b'), 'test':
('la-ud-test.conllu', '19c62c64ce41a650e9b55a345c61e7c0d994816e'),
'train': ('la-ud-train.conllu',
'183ce6f58b0305e5926161e29b9a6aacc424662c')}, 'lv': {'dev': (
'lv-ud-dev.conllu', '6bf3843d92aeb5b4a5e3b457708ad0aca176fbd2'), 'test':
('lv-ud-test.conllu', '9f7806a24656db0e859efe041a88926b220b8e28'),
'train': ('lv-ud-train.conllu',
'f1eeff608e8f27d92b683ae041591355198841eb')}, 'lt': {'dev': (
'lt-ud-dev.conllu', '0b8dc19005571fa7b66d8302b797d51a241f128b'), 'test':
('lt-ud-test.conllu', 'def54d6caf97610eb4ca8c0179d661c8eab98951'),
'train': ('lt-ud-train.conllu',
'13fe42a3d21f17a5cad5aaf38692619c7713e177')}, 'mr': {'dev': (
'mr-ud-dev.conllu', 'abf7ac90a3696bb979e6ddc17cbc0fc761040b1b'), 'test':
('mr-ud-test.conllu', 'b70e2a135e69dc17474951bfd9c7cf3f203d4798'),
'train': ('mr-ud-train.conllu',
'24a1370184054a7f5af647997dca783d6c571242')}, 'sme': {'sample': (
'sme-ud-sample.conllu', '8c456f06b363c4d273fc454a49505f783f00fe43'),
'test': ('sme-ud-test.conllu',
'6c2084f60d7f2d1468a0cb4f4a4b9669274b122e'), 'train': (
'sme-ud-train.conllu', '203eab4183fd585efe3fea7e6df493a6746b0a9f')},
'no_bokmaal': {'dev': ('no_bokmaal-ud-dev.conllu',
'3a1aa6646ee62c605a6e5a7b535434ce93d0581f'), 'test': (
'no_bokmaal-ud-test.conllu', '18336ef0e4877ae28eb7d6019afe05b5a53245d5'
), 'train': ('no_bokmaal-ud-train.conllu',
'c6a1d75956dfb9376e568bf241b3ee5ebf3be3a5')}, 'no_nynorsk': {'dev': (
'no_nynorsk-ud-dev.conllu', '5b95a070d11a61a23fc340ecbbbbb70f86884498'),
'test': ('no_nynorsk-ud-test.conllu',
'3eaab8e4af82de2333521e9be0954ffaf6b1440b'), 'train': (
'no_nynorsk-ud-train.conllu',
'79319993097c30ddf28d4c1137b8662f4f35d17e')}, 'no_nynorsklia': {'dev':
('no_nynorsklia-ud-dev.conllu',
'f3e3cc9b156784c12e7540b6e09a19963df8d7d9'), 'test': (
'no_nynorsklia-ud-test.conllu',
'c43abf4ad0d9c1d844edb9ff0fdf8b00949c4a0b')}, 'cu': {'dev': (
'cu-ud-dev.conllu', '0b67035ed5ca52aeefae443611232ed202fb990a'), 'test':
('cu-ud-test.conllu', '0fed872a5a2480b601c67ebbecf8dcd680b6863b'),
'train': ('cu-ud-train.conllu',
'1c58f7322b96aa65e2b6bbeb5cb5226b46dc3ef0')}, 'fa': {'dev': (
'fa-ud-dev.conllu', '098f97ff4c0a6a9dcaafe2c83908b1ff044b4446'), 'test':
('fa-ud-test.conllu', '0024aa6bad5eceed2e36f77d88578304a5886a80'),
'train': ('fa-ud-train.conllu',
'1692f90f58fb1ed2faaa4e8c5d2d47a37c47082b')}, 'pl': {'dev': (
'pl-ud-dev.conllu', 'b7af7bee091feb0788eb9793a7102972006421dc'), 'test':
('pl-ud-test.conllu', 'e141e793ba35f8a08510ec1ce494099b5c800ca8'),
'train': ('pl-ud-train.conllu',
'f2227ba184a5030fc47b1aff732e04ae11b9ab94')}, 'pt_br': {'dev': (
'pt_br-ud-dev.conllu', '8eedc77096a87fe8ab251100d460780e161e5397'),
'test': ('pt_br-ud-test.conllu',
'37a64e3acef107b62ab62ce478fc36ed112fb58f'), 'train': (
'pt_br-ud-train.conllu', '023cafcb6959d52298ad619f7838f26db9798aa9')},
'pt_pud': {'test': ('pt_pud-ud-test.conllu',
'4f7a98b59255ff58a1a423dda6f2cb7261dcea7d')}, 'pt': {'dev': (
'pt-ud-dev.conllu', '2171b4ac2b0726c9dfae6adf394b76be927accab'), 'test':
('pt-ud-test.conllu', '9e819a4592db42905806141d6fca3b7b20396ce3'),
'train': ('pt-ud-train.conllu',
'b5fbb6598d5cc53a0f7e699adeb4a61948a49b5c')}, 'ro_nonstandard': {'test':
('ro_nonstandard-ud-test.conllu',
'300d53091412dc5700dc5cad0fd3e136f7c8cb11'), 'train': (
'ro_nonstandard-ud-train.conllu',
'ed97f51129b63857627f838f68f41c9ef8541686')}, 'ro': {'dev': (
'ro-ud-dev.conllu', 'a320e29582e837fa48bbe0aab8e205cadfcb4a02'), 'test':
('ro-ud-test.conllu', '0cfe4806a28ebdc02dc7ea58635d8b550c3a9d7b'),
'train': ('ro-ud-train.conllu',
'74beb2aa92d2fca50dbb1a4f716b936afb436ab9')}, 'ru_pud': {'test': (
'ru_pud-ud-test.conllu', 'bca81ce7aaf3cb8add98b19faecc1d8303901631')},
'ru_syntagrus': {'dev': ('ru_syntagrus-ud-dev.conllu',
'304c6ec7fb5060583af5f890384e3a480f8c3ad5'), 'test': (
'ru_syntagrus-ud-test.conllu',
'c138e39b48dc1c66d106e68ee75c6fce28ef780c'), 'train': (
'ru_syntagrus-ud-train.conllu',
'8fa56fa80845e4ad946189d1e7af228b5595e312')}, 'ru': {'dev': (
'ru-ud-dev.conllu', 'd3b11c0fd8a87bfb7ce9666a1888126ae5ddca90'), 'test':
('ru-ud-test.conllu', 'ae13bbf49e0d2fddae8ba2eeacd15a9a77c7bfff'),
'train': ('ru-ud-train.conllu',
'fd43e7323ad2e62a6924fc5b5d48e85c6ab5a430')}, 'sa': {'test': (
'sa-ud-test.conllu', 'fad3a03a6834884a092b1d326625c6f663e36636')}, 'sr':
{'dev': ('sr-ud-dev.conllu', 'dcb9a242986285e83512ddaa4b3ada07c4cea17a'
), 'test': ('sr-ud-test.conllu',
'0f0c9e394c440bb2dd514bdd6873d3ffef13821b'), 'train': (
'sr-ud-train.conllu', '97ea9bfe4ac97011598fbb5ca20b5cbaf5093334')},
'sk': {'dev': ('sk-ud-dev.conllu',
'c84563c08922d60b0c765e9f9c22d9f6f2765ff9'), 'test': (
'sk-ud-test.conllu', '89af4581c5f9058809f48788eb635a92cda0603c'),
'train': ('sk-ud-train.conllu',
'89e108093bbf5619578955fdadfe200cefd8cf01')}, 'sl_sst': {'dev': (
'sl_sst-ud-dev.conllu', 'c65ae82123af95ec11f47262546b5ab2fc5735e5'),
'test': ('sl_sst-ud-test.conllu',
'144a0124c1181b49d0c542a4a6d4465e45545f3b'), 'train': (
'sl_sst-ud-train.conllu', '4cbb97d5c19cfb1d85cdd54a13e24de2343a4ac5')},
'sl': {'dev': ('sl-ud-dev.conllu',
'0078572c19574d32defeae9924176da2dd701ede'), 'test': (
'sl-ud-test.conllu', '616ace00e25df99be8dd49b7bf7c48f1093df96a'),
'train': ('sl-ud-train.conllu',
'1462ac69163b30cf1399527e95f686ebf91be2d3')}, 'es_ancora': {'dev': (
'es_ancora-ud-dev.conllu', '94b00cc6449a1793b5ba1d9d5c1e4b34ad1cc7d5'),
'test': ('es_ancora-ud-test.conllu',
'8d7dc8d8441e1ca4b54708a5382ed61b48bf7920'), 'train': (
'es_ancora-ud-train.conllu', '95d5bf7ad33304f3440ffb014ac094c4967c303f'
)}, 'es_pud': {'test': ('es_pud-ud-test.conllu',
'c2b17fce1da3bdd2a50d9dd7eca101db1d2907e0')}, 'es': {'dev': (
'es-ud-dev.conllu', '4cdb828c492c6b7707af0ab6c7fbf734f770630a'), 'test':
('es-ud-test.conllu', 'afd1ae1b7eb73a91456c30acf388eef4faf4785a'),
'train': ('es-ud-train.conllu',
'5ce48b44ba1b3e748a40cb5bf893d3096518ecbc')}, 'sv_lines': {'dev': (
'sv_lines-ud-dev.conllu', '15f1a04d960518fe7bfee23ce227fc7b78d4b755'),
'test': ('sv_lines-ud-test.conllu',
'843df4ea3ab4f551b1eaa661652a8d6489a81d41'), 'train': (
'sv_lines-ud-train.conllu', '16e3533bf174b36d728847a36a3600f16c63baa6')
}, 'sv_pud': {'test': ('sv_pud-ud-test.conllu',
'18dadac0c15468256b340835ebc0529facbe9b73')}, 'sv': {'dev': (
'sv-ud-dev.conllu', '6d14e1aae5c9ae37c35481c44c04bf74a4233455'), 'test':
('sv-ud-test.conllu', '7ead0f7b49508db0022c042195ac5925b611c5b7'),
'train': ('sv-ud-train.conllu',
'68affb85efde6ed017eab1e998e9666108559e04')}, 'swl': {'dev': (
'swl-ud-dev.conllu', '828e0a08f12cabfa75f9dd2b53dba58606522a7c'),
'test': ('swl-ud-test.conllu',
'674f76631cf16172d67b795ff92dfbb297eb4930'), 'train': (
'swl-ud-train.conllu', '46b721f9cae2d5ba43f818dd487600b0ce76362a')},
'ta': {'dev': ('ta-ud-dev.conllu',
'4d01f555012ddc1976933d4d928e26470f71bfa1'), 'test': (
'ta-ud-test.conllu', 'e8db8816a98d8b7e81188786db7c405979a7e3c3'),
'train': ('ta-ud-train.conllu',
'6753d8c7b1b016de39c087aab45056de6021c3ae')}, 'te': {'dev': (
'te-ud-dev.conllu', '29f46355d767e54e8565f76a063c43e95ead0fca'), 'test':
('te-ud-test.conllu', '50abe345d4ab5bae021cacd096266c57b00572b8'),
'train': ('te-ud-train.conllu',
'1794469abe09e7364cda0d9764cf515dcb4a61b6')}, 'tr_pud': {'test': (
'tr_pud-ud-test.conllu', 'aae839e2476a2f149c98e0274d245d07a50dafaa')},
'tr': {'dev': ('tr-ud-dev.conllu',
'421de4d8d0fbdda46750523bde72880414c134a3'), 'test': (
'tr-ud-test.conllu', 'b175f136f6f0271c494a58a1846971c4a07cda27'),
'train': ('tr-ud-train.conllu',
'5aeaf25fc9e00c75e377983a0d0a642e4df6ae7d')}, 'uk': {'dev': (
'uk-ud-dev.conllu', '0d3e3507edcd46a3eaa8c4702d0f5d84661a6d9d'), 'test':
('uk-ud-test.conllu', '46c88fd623894fabdafb01a826016c215e4f65cc'),
'train': ('uk-ud-train.conllu',
'd06e0e2fa67c35a20517738bd728ac3b26d8eafe')}, 'hsb': {'sample': (
'hsb-ud-sample.conllu', '148eddbb19b06115ea54e17a3fca58e99a85cbd9'),
'test': ('hsb-ud-test.conllu',
'3d319288b4c06395b2627980737131995949f770')}, 'ur': {'dev': (
'ur-ud-dev.conllu', 'dc41e72b5adeb92f308cdc8dfcbf71f84b4a5cf9'), 'test':
('ur-ud-test.conllu', 'af5da25be4c4ec1f2a222bc462b39ca4bbcc0eb0'),
'train': ('ur-ud-train.conllu',
'488d65b394d0de264be1221614c09e541f92f9de')}, 'ug': {'dev': (
'ug-ud-dev.conllu', 'a2e6cd7ef51ffd7c83de7c62fbad998f1020f857'), 'test':
('ug-ud-test.conllu', '4877323d8dbfaa8ab862f0aa8e5484fdadb9ef43')},
'vi': {'dev': ('vi-ud-dev.conllu',
'1c733d3ea3e4cce00cb0aa4d599bcb3b0a6096a8'), 'test': (
'vi-ud-test.conllu', '1bb822e58f21aa5ccac15fe6c6742a42e8389d41'),
'train': ('vi-ud-train.conllu',
'ac86132afc061625740abd524c5cdf3d35ebbbc4')}}
<|reserved_special_token_1|>
# coding: utf-8
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines
"""Constants."""
UNK_TOKEN = '<unk>'
BOS_TOKEN = '<bos>'
EOS_TOKEN = '<eos>'
PAD_TOKEN = '<pad>'
UNK_IDX = 0 # This should not be changed as long as serialized token
# embeddings redistributed on S3 contain an unknown token.
# Blame this code change and see commit for more context.
LARGE_POSITIVE_FLOAT = 1e18
LARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT
GLOVE_NPZ_SHA1 = \
{'glove.42B.300d': ('glove.42B.300d.npz',
'7deee8f4860744db53ed9e50892effe9883e6d89'),
'glove.6B.100d': ('glove.6B.100d.npz',
'01f80f202fcabcc3e0804898349087bfc191dd1c'),
'glove.6B.200d': ('glove.6B.200d.npz',
'5e6e2bdab346c257f88d80d215d518e680d86e32'),
'glove.6B.300d': ('glove.6B.300d.npz',
'1db264aa936be62f055dfb72854204450bdf4399'),
'glove.6B.50d': ('glove.6B.50d.npz',
'aa16be8d184399d2199f83fd62586f2c30497bfa'),
'glove.840B.300d': ('glove.840B.300d.npz',
'b4ba390c1154736e07c0e67d9180935f5930e83c'),
'glove.twitter.27B.100d': ('glove.twitter.27B.100d.npz',
'0f7b82c223451d0002f79ba23596983cdbe0e2b1'),
'glove.twitter.27B.200d': ('glove.twitter.27B.200d.npz',
'41cc2d26f58a54622ce96bf6c8434360ab524f20'),
'glove.twitter.27B.25d': ('glove.twitter.27B.25d.npz',
'9f563d2f296995598cc46812b2fda05ad4c3c879'),
'glove.twitter.27B.50d': ('glove.twitter.27B.50d.npz',
'ce9959c056f2a0a780c468feeb4f823af51630e9')}
FAST_TEXT_NPZ_SHA1 = \
{'crawl-300d-2M': ('crawl-300d-2M.npz',
'9dd611a1fe280c63050cd546d3595400fc0eede4'),
'wiki.aa': ('wiki.aa.npz',
'48f163b80eb37f1806142169d3d4c05cf75b7339'),
'wiki.ab': ('wiki.ab.npz',
'860ceff119dd27e5b701b605879037c1310cbc3e'),
'wiki.ace': ('wiki.ace.npz',
'62938287464040491719f56a6f521f8f808beee8'),
'wiki.ady': ('wiki.ady.npz',
'646843afa260d018ed711df3f1ca9c3e000447b6'),
'wiki.af': ('wiki.af.npz',
'7b14cd27690b67fea318d0bac2283c16430680e2'),
'wiki.ak': ('wiki.ak.npz',
'20f309adad1c45958c97b6055d5838e05bbaea72'),
'wiki.als': ('wiki.als.npz',
'a8b03aa133c4f7da12fc27c2b167b7918b1e9805'),
'wiki.am': ('wiki.am.npz',
'ed3dd10cea64737f7a1623612ee099df9dc19f66'),
'wiki.ang': ('wiki.ang.npz',
'8efe64706d9d6b8eae38b2c7ff0b277e20592bc7'),
'wiki.an': ('wiki.an.npz',
'168046283c719ab96a29b1abae2e25a6575c7be8'),
'wiki.arc': ('wiki.arc.npz',
'049021b7decea4bc009b12936e56b4dbf5b760e7'),
'wiki.ar': ('wiki.ar.npz',
'7e325e1e98dfcdc9368d2ebe40ee834a2ed44912'),
'wiki.arz': ('wiki.arz.npz',
'7d851c2c7be3ee6f7fd896de7b76ea08e3fb08b0'),
'wiki.as': ('wiki.as.npz',
'01d38c29cd4bd99c1a8534abc058822da14a5b9c'),
'wiki.ast': ('wiki.ast.npz',
'9c9846ba5084505a0adea89c95c66e04efbf5ce9'),
'wiki.av': ('wiki.av.npz',
'7ef6a920c364638504e673cfde5f7675503fa81e'),
'wiki.ay': ('wiki.ay.npz',
'c1202e110930e3902397f5cb64a8359e013b469f'),
'wiki.azb': ('wiki.azb.npz',
'10351b7ef14ec2cb610d290cb6a3f6987ef5d8b3'),
'wiki.az': ('wiki.az.npz',
'74257c3bcd533a606afae509ea835dc036d61546'),
'wiki.ba': ('wiki.ba.npz',
'4a2857ed694d66864df562b376c2fa12fcb03646'),
'wiki.bar': ('wiki.bar.npz',
'e65c6b7e9ff83798d1eea05d166148837d53e615'),
'wiki.bat_smg': ('wiki.bat_smg.npz',
'6420584ae28ba6c9dd145fea8f096243d457c2d8'),
'wiki.bcl': ('wiki.bcl.npz',
'33606c970ab336b678393e2bdb8af2116d11cf7b'),
'wiki.be': ('wiki.be.npz',
'84487d341e333344cf71bc12c7a205d923762498'),
'wiki.bg': ('wiki.bg.npz',
'56f2a175b1a1d1a9cf9f1cea277cd0b46ffd7f66'),
'wiki.bh': ('wiki.bh.npz',
'07473989853a344a41aaa18f41030dc56d0d01c7'),
'wiki.bi': ('wiki.bi.npz',
'08adfa3c9ef3016d30ef69ea539d217ff67eda09'),
'wiki.bjn': ('wiki.bjn.npz',
'998a551283222931d3a26922308449950bfa3ec7'),
'wiki.bm': ('wiki.bm.npz',
'454ff9fbd4790e4a076d9a2087a51da28aa1332f'),
'wiki.bn': ('wiki.bn.npz',
'1f36f6f39c9a9b33bb8035c9a4dc7e04933604fd'),
'wiki.bo': ('wiki.bo.npz',
'b9fe87318428de0a7790de175b5fec80c5af482d'),
'wiki.bpy': ('wiki.bpy.npz',
'5c7853173d27e2c018c24eca69de8d5f34511b0d'),
'wiki.br': ('wiki.br.npz',
'7aa66a2034fbfaa1d39e637385d48610238797c9'),
'wiki.bs': ('wiki.bs.npz',
'a019a4677677c2e9e4d899326b2b6c15ad6c011a'),
'wiki.bug': ('wiki.bug.npz',
'09ae3477941d7a99d1df494368d7efb0b2c18913'),
'wiki.bxr': ('wiki.bxr.npz',
'b832c691b8ddd95896c052d3d15e1f98d72068d5'),
'wiki.ca': ('wiki.ca.npz',
'391e0d4daad08649251274fa1cc2a5f49c7728b1'),
'wiki.cbk_zam': ('wiki.cbk_zam.npz',
'02e57a763bc9f9eadaba57953383dd12a0a78a37'),
'wiki.cdo': ('wiki.cdo.npz',
'd6e8f422327e8b2273f1f2662d793707ece6695d'),
'wiki.ceb': ('wiki.ceb.npz',
'23bc0bb9aeaa57dff35092766941a866de142aae'),
'wiki.ce': ('wiki.ce.npz',
'182b2a889256119a6d379d501c55c7621e5855db'),
'wiki.ch': ('wiki.ch.npz',
'82dd77512fcb463481f43c9cef3507e2baa90d7b'),
'wiki.cho': ('wiki.cho.npz',
'b0b620fc2442d1a6e2440e71a424861c80175f0c'),
'wiki.chr': ('wiki.chr.npz',
'3d62c6b95c5af46abd6234426ae760cca65d5bd0'),
'wiki.chy': ('wiki.chy.npz',
'34a28a22da79aebc100e3714b825c95c8d5f54a3'),
'wiki.ckb': ('wiki.ckb.npz',
'ad19461e4be583d08b7693ff5b1e9d590ed41add'),
'wiki.co': ('wiki.co.npz',
'fa60d9f0e79f1c7e15f381aef983a0f4f31c05a8'),
'wiki.crh': ('wiki.crh.npz',
'540270ba6edd9d7b2f7efca52b3b407524ac67d1'),
'wiki.cr': ('wiki.cr.npz',
'f06b77465a38ec960d7d5a7554b848c37e945c76'),
'wiki.csb': ('wiki.csb.npz',
'b8b28559cf2541341af98e2aa755856765bdeabf'),
'wiki.cs': ('wiki.cs.npz',
'19881e931fe06abf341450f00c342d364313e232'),
'wiki.cu': ('wiki.cu.npz',
'731e0d00abd53bc2a8eb6cf37f6ab883cff34e15'),
'wiki.cv': ('wiki.cv.npz',
'e60034fcffb7dfef7b236ddba1194c3aa20b7967'),
'wiki.cy': ('wiki.cy.npz',
'5a0fb967b5556f007c0d5065f951a3d3b1c1005a'),
'wiki.da': ('wiki.da.npz',
'd06258014ba2c7450bc2d55edfdf1731433e42e5'),
'wiki.de': ('wiki.de.npz',
'a21694dfd2af63bd7bb00f0b60b28e88bd1153f1'),
'wiki.diq': ('wiki.diq.npz',
'4f6c77a86b39834a7130419967759afd8cc26b84'),
'wiki.dsb': ('wiki.dsb.npz',
'e74f1d346a8db96987bff0c33ee5f886907c380a'),
'wiki.dv': ('wiki.dv.npz',
'5d6fe6f0eec2e7704121d5aba03b4edbb28af873'),
'wiki.dz': ('wiki.dz.npz',
'77c639d36d0355b2de5adead7996eae342b852a6'),
'wiki.ee': ('wiki.ee.npz',
'4b5a76127d57515d3e8a76787cdefde5856b754a'),
'wiki.el': ('wiki.el.npz',
'a00bcb97e7898931196a1c69f7a492e5b6202661'),
'wiki.eml': ('wiki.eml.npz',
'b475d626b3d97e7a68c02827fdc7900599e838c6'),
'wiki.en': ('wiki.en.npz',
'ad5ec6d49db6c6fe76b8e85ff05d34e5d0e1eb6a'),
'wiki.eo': ('wiki.eo.npz',
'18049b0010520d13e676f5a82e8bb90153d99003'),
'wiki.es': ('wiki.es.npz',
'a6d192ba7d82d762f8367e75ca951aad4d11e410'),
'wiki.et': ('wiki.et.npz',
'4beb7025cf88f1aa62d025b187f0cb09aee61858'),
'wiki.eu': ('wiki.eu.npz',
'5e1a8197e35f20a2476798bbb935b4c131289c4f'),
'wiki.ext': ('wiki.ext.npz',
'049b2d1b0a8b102b45907cf487cac30aa294e0a0'),
'wiki.fa': ('wiki.fa.npz',
'81ed274997c87ef87d73d25e166ca06272ce426f'),
'wiki.ff': ('wiki.ff.npz',
'4867dc74cd53ca0b0f769af4fa1ea420406b59bf'),
'wiki.fi': ('wiki.fi.npz',
'6d1291b854045179f8171ac7d62ede7d8ac159a2'),
'wiki.fiu_vro': ('wiki.fiu_vro.npz',
'dd87806d9dc8833fa0e21e35a50815ebdbaa6c8b'),
'wiki.fj': ('wiki.fj.npz',
'cf5c31b0a69276f5dd18ab738ed92444abaeb755'),
'wiki.fo': ('wiki.fo.npz',
'ffc19807d528af000861a94cfb8097bd686e14fc'),
'wiki.fr': ('wiki.fr.npz',
'8f06d5dbe3cf7214354fe9b2f6eca0ef7419f063'),
'wiki.frp': ('wiki.frp.npz',
'c8b200ae592478d3cd0bfaafcd7aa19de8a3bfe5'),
'wiki.frr': ('wiki.frr.npz',
'fa5e5c39ea2a45793c679eacea290a35e37405ea'),
'wiki.fur': ('wiki.fur.npz',
'a61a8940d059f25000e3fe23933e5ed0d37e65d3'),
'wiki.fy': ('wiki.fy.npz',
'46f9f41bdf6f4fb8e27a753290413d745465963b'),
'wiki.gag': ('wiki.gag.npz',
'49fb01230e6803544122d47ab7d3fe694d1444f2'),
'wiki.gan': ('wiki.gan.npz',
'716b7b26acc15975f30caf3c6effa111516fcca5'),
'wiki.ga': ('wiki.ga.npz',
'ea934bc1fdc1acf6caf9ac746c6c499251f1fdee'),
'wiki.gd': ('wiki.gd.npz',
'597017b5a32d933f194595d3656f858e37e70a62'),
'wiki.glk': ('wiki.glk.npz',
'91a5834658bc2d48714e8807ef24efb79567b4b5'),
'wiki.gl': ('wiki.gl.npz',
'2fa8e48d6ae1e9c9d542eb3f2156cf9e359e66c2'),
'wiki.gn': ('wiki.gn.npz',
'e359eef3928e1f1b5d8fcf0ea532e8794c66289a'),
'wiki.gom': ('wiki.gom.npz',
'8cd361481c23f7545cc2bd8f1bf22aa7400edd4d'),
'wiki.got': ('wiki.got.npz',
'd05daf105611150695e61775fdff2c500b36be3f'),
'wiki.gu': ('wiki.gu.npz',
'0ce175c5fc39bab4032892f70c9d2bb850af0f4a'),
'wiki.gv': ('wiki.gv.npz',
'2c573f873d607831ff01b64603c17b8db79bd7e1'),
'wiki.hak': ('wiki.hak.npz',
'e6048727799cdf149f5c50037e0fc59300d33a94'),
'wiki.ha': ('wiki.ha.npz',
'f18ea7286bbd390c5470896b2c99cb1adc740064'),
'wiki.haw': ('wiki.haw.npz',
'18bcd85d2e06b1b889f0835fc5b62697fdf32d72'),
'wiki.he': ('wiki.he.npz',
'76915ff167b6ecb7b7e22ff0ca46914a55d344af'),
'wiki.hif': ('wiki.hif.npz',
'12153aaf98d76d5502ab77a27cd0b9a539f61513'),
'wiki.hi': ('wiki.hi.npz',
'249666a598991f6ec147954c6af9e531fd1cd94e'),
'wiki.ho': ('wiki.ho.npz',
'3f804fd69780c0789708b56ea9d48715f8e38f26'),
'wiki.hr': ('wiki.hr.npz',
'9a3de28e69f97048bfb480b4f83eaab6149f66ad'),
'wiki.hsb': ('wiki.hsb.npz',
'7070bf64e13299dd66ac0e9f8e24011a56b6bfe8'),
'wiki.ht': ('wiki.ht.npz',
'a607093d511afeb584d02dc676bc5a27eff66287'),
'wiki.hu': ('wiki.hu.npz',
'9b2c4750daf1bcf39768572e874b5afda0e2f0bc'),
'wiki.hy': ('wiki.hy.npz',
'ec0461a102a6fb00bd324f66cefd3c8d55a7093a'),
'wiki.hz': ('wiki.hz.npz',
'5dfb8afbdae6b4148c3e55ab459c56a74b46b463'),
'wiki.ia': ('wiki.ia.npz',
'4cfaaf053b9513bbf5b2423258c0f01d20256de6'),
'wiki.id': ('wiki.id.npz',
'bace396bb9941cc9e5b2e5f5a19be6db833c5fd4'),
'wiki.ie': ('wiki.ie.npz',
'1bae7256c2e763ce6d692d1c0a603d99a8b22826'),
'wiki.ig': ('wiki.ig.npz',
'23128e54a5e143891d392d621723bad9cfc8cf7b'),
'wiki.ii': ('wiki.ii.npz',
'54bc16d05da512481865a89ecf30260b0acc04dc'),
'wiki.ik': ('wiki.ik.npz',
'f8015227e893d2375699b7d132b306ba381f02ac'),
'wiki.ilo': ('wiki.ilo.npz',
'185a11f81bd5d24a34558dda81ee4735f5ba150b'),
'wiki.io': ('wiki.io.npz',
'ddf8180a90aa6ee5be93a2582cc99c535f21363e'),
'wiki.is': ('wiki.is.npz',
'968f8dd2a093b279a6f7aaa734008454bf51d724'),
'wiki.it': ('wiki.it.npz',
'fdfb857a309b2c3d29482bb5cc55f21b858d2e6f'),
'wiki.iu': ('wiki.iu.npz',
'fa8896730bd6c24c3473daa22116d1016294e7f7'),
'wiki.jam': ('wiki.jam.npz',
'a8f0d0b99c89ace0a6401b8fcda261d06065faaf'),
'wiki.ja': ('wiki.ja.npz',
'8d42e5a40e4d1d8645b2d80b873a65cadcf68b5c'),
'wiki.jbo': ('wiki.jbo.npz',
'145fc999ab004b348cf9bf445f0a93a7a145308b'),
'wiki.jv': ('wiki.jv.npz',
'66978770bf06e42414395cf5fd8c596044d72bec'),
'wiki.kaa': ('wiki.kaa.npz',
'624a640ecb9901b2aba2e9f44ab615146ecb2862'),
'wiki.kab': ('wiki.kab.npz',
'e97f93b6ba65e95c85b7541932cf53c5ad9eb896'),
'wiki.ka': ('wiki.ka.npz',
'1ca8376e1e0cbd58001c1b51a2d488a2874a6743'),
'wiki.kbd': ('wiki.kbd.npz',
'f2d2a05b06723ac549784ad5470d84f5742a1352'),
'wiki.kg': ('wiki.kg.npz',
'fa7f6d5f660a173a3e75342d449980eedcdc789e'),
'wiki.ki': ('wiki.ki.npz',
'21a8c7c616c0050c51c288861f3423f313e4f634'),
'wiki.kj': ('wiki.kj.npz',
'f3c347509a0d81f4f7fdbb8b22889b8d76e5014e'),
'wiki.kk': ('wiki.kk.npz',
'bc24a3289e1c1e18e16b6789c2f9f92af1e73071'),
'wiki.kl': ('wiki.kl.npz',
'b8b7e7359f067836e2be2ecfe9f35a820b00fe1d'),
'wiki.km': ('wiki.km.npz',
'e053799fd01463808432dc035bef3e36620e2f36'),
'wiki.kn': ('wiki.kn.npz',
'2849a0a8b3453e9bf6af05d4c7bd3db881dd1068'),
'wiki.koi': ('wiki.koi.npz',
'a9b02e9bd41833bcd54769f94626019c03f29997'),
'wiki.ko': ('wiki.ko.npz',
'764d9896e74b5a26c6884d48bce3bed8ed3a7822'),
'wiki.krc': ('wiki.krc.npz',
'bfe39598c718f1cc95909db7544b3214b308a97c'),
'wiki.kr': ('wiki.kr.npz',
'1e6af853d4a8ea7830e116eb9b61ac5d7d9a315c'),
'wiki.ksh': ('wiki.ksh.npz',
'66cd0e3e0a0b0282a13960571ebe7cddd7706bf2'),
'wiki.ks': ('wiki.ks.npz',
'85f1adaa05b854df4dede745a1aaab3836e60770'),
'wiki.ku': ('wiki.ku.npz',
'faf90584e5a45e6d0f9eeb88399b82abe037d584'),
'wiki.kv': ('wiki.kv.npz',
'9f2b41822013a412da9c99fac06eed8be03ca192'),
'wiki.kw': ('wiki.kw.npz',
'3eed8a8fc97a2fc79241b8474a458c98d00fc897'),
'wiki.ky': ('wiki.ky.npz',
'0116ff90f10a6c0728e1ea86d8a44896ea83270a'),
'wiki.lad': ('wiki.lad.npz',
'5af2015b3d1c5e8563f0e92721580988ebe2ce50'),
'wiki.la': ('wiki.la.npz',
'7143303a3ea13c7668eb90ea6e3d2ca69857a3be'),
'wiki.lbe': ('wiki.lbe.npz',
'f206a3c35a184ba5d2b32ee68640eadf66c847da'),
'wiki.lb': ('wiki.lb.npz',
'143dc6337f3690379282034c460c613d7f144923'),
'wiki.lez': ('wiki.lez.npz',
'b29a680decc6b29f24e8eb9e4f8e11e3419d45f1'),
'wiki.lg': ('wiki.lg.npz',
'866640ce62cedbc1d453b7ea3c289c291ad76e13'),
'wiki.lij': ('wiki.lij.npz',
'0dcd3d7009ae89b1016ca6cdb99a9f0d70bc4baf'),
'wiki.li': ('wiki.li.npz',
'4666b3c238256d7b7623a136db19b8b9f4754734'),
'wiki.lmo': ('wiki.lmo.npz',
'ac89fa7cfe0675950bcb31c66bf3f88a3cfc98f0'),
'wiki.ln': ('wiki.ln.npz',
'fba158719944aabe58e0002a90be0ed77e11702d'),
'wiki.lo': ('wiki.lo.npz',
'1e113e340a8a93d385e14502c9c4e3bcdf6c3101'),
'wiki.lrc': ('wiki.lrc.npz',
'42cb755f398fba6f0da7949c91e92b55654bd482'),
'wiki.ltg': ('wiki.ltg.npz',
'182f75859e228d1162215f28fe7f2dca127624a4'),
'wiki.lt': ('wiki.lt.npz',
'66aa944bd2e777cb82d6d59b1f2f837b6c48cb37'),
'wiki.lv': ('wiki.lv.npz',
'2be8f926da85694fa998bf79d80b61ebb8d67576'),
'wiki.mai': ('wiki.mai.npz',
'b8a9c36e2a0f1bb84a44dc762250d2a9007ef637'),
'wiki.map_bms': ('wiki.map_bms.npz',
'6f0394d6b3d08a946e3df4b9355efe94148f018a'),
'wiki.mdf': ('wiki.mdf.npz',
'774ee35334641db57f9ac9069961c5372a5d92e8'),
'wiki.mg': ('wiki.mg.npz',
'496c48ef668f08ce95ebb11ce1ce5026b52d935c'),
'wiki.mh': ('wiki.mh.npz',
'352edd84f99c5aa277a7306f6cacea1fab065ed3'),
'wiki.mhr': ('wiki.mhr.npz',
'dd78b27a674ac10411cdf74ac32f9391506b17e0'),
'wiki.min': ('wiki.min.npz',
'628b406441ab03bc8aa68195ada50bfdc8226f34'),
'wiki.mi': ('wiki.mi.npz',
'754127b473861cd4f9ae034c9f527a34827b1f00'),
'wiki.mk': ('wiki.mk.npz',
'b09fed4f56c296f13c4020ef1fec498382a38b73'),
'wiki.ml': ('wiki.ml.npz',
'02fb55d97ca2f0408f0e7e8dd6a661bbc3319a2a'),
'wiki.mn': ('wiki.mn.npz',
'08b2c45689aa5d9ec49df96dc7c777ce9b9a0b4b'),
'wiki.mo': ('wiki.mo.npz',
'638c2e8bd2352fd52921b9ae62f578b8357bab49'),
'wiki.mrj': ('wiki.mrj.npz',
'ec5cf1f4fb8dfdca64d8172974e620eb8fa41626'),
'wiki.mr': ('wiki.mr.npz',
'074dd68c947c2f137a3e84b55012925f00213139'),
'wiki.ms': ('wiki.ms.npz',
'3dbe9e9d70251de8a374776ff1250a9c3103ee59'),
'wiki.mt': ('wiki.mt.npz',
'f5103998a68d1b178387417436a83123d44aba01'),
'wiki.multi.ar': ('wiki.multi.ar.npz',
'a010d1d81a465c56ebaf596b3e8e8795e7f0f8e3'),
'wiki.multi.bg': ('wiki.multi.bg.npz',
'c04018f3a600cee170f12a36cdd35b4727a2aade'),
'wiki.multi.ca': ('wiki.multi.ca.npz',
'eef52a0cf20c133ca9065de25f0702861a8cfa29'),
'wiki.multi.cs': ('wiki.multi.cs.npz',
'c5f547aa78c0e3d7dae67a0334d500bf2a86aa30'),
'wiki.multi.da': ('wiki.multi.da.npz',
'24374f2ee169b33327feeee46da31b0de1622fe4'),
'wiki.multi.de': ('wiki.multi.de.npz',
'2e6c119b345bebd34b56eaaf855d6703889b11f7'),
'wiki.multi.el': ('wiki.multi.el.npz',
'9d122beedb80a2e5334946641e5bafd32c01e76b'),
'wiki.multi.en': ('wiki.multi.en.npz',
'8c3c480b4cb2690304173713a646280613b244a8'),
'wiki.multi.es': ('wiki.multi.es.npz',
'483a22656e4fb2a01e9f4ef8156b261e780850ab'),
'wiki.multi.et': ('wiki.multi.et.npz',
'22498c7b91645a3874fa738b5cfb16bf98b6f97c'),
'wiki.multi.fi': ('wiki.multi.fi.npz',
'765a6f0b63777bff4ae6ca2b461c5889c03d6a70'),
'wiki.multi.fr': ('wiki.multi.fr.npz',
'decd9aacf600114b8a36072535c0309874a37c83'),
'wiki.multi.he': ('wiki.multi.he.npz',
'7eee940c1b85936f59122f4b1a166223dd946674'),
'wiki.multi.hr': ('wiki.multi.hr.npz',
'1673963416af088f8bf15576afb33d58115db35c'),
'wiki.multi.hu': ('wiki.multi.hu.npz',
'a1fbe6ededf3cbaa3eaa22dd8b20cce4b36cfc6d'),
'wiki.multi.id': ('wiki.multi.id.npz',
'6c3e721febb511ede7db7bf978d65769e4270f5c'),
'wiki.multi.it': ('wiki.multi.it.npz',
'fc5bfc11e0165e8d95c1708573dad5e456826c73'),
'wiki.multi.mk': ('wiki.multi.mk.npz',
'6cd50198355674f156fc863108d9bebf11cfabd9'),
'wiki.multi.nl': ('wiki.multi.nl.npz',
'4fa06b9230c95dfa5a9e9a5d80f1f5ba614d3cbf'),
'wiki.multi.no': ('wiki.multi.no.npz',
'63756168c1101e73fba8d1a5015f32b8892819e6'),
'wiki.multi.pl': ('wiki.multi.pl.npz',
'958b8e8bead965ba1bb1433e1c960fc3e12a10fb'),
'wiki.multi.pt': ('wiki.multi.pt.npz',
'22f07df1609d79b95344ee575ea43141424a1528'),
'wiki.multi.ro': ('wiki.multi.ro.npz',
'73180b3e382519004bf38ea7b86237aacbbe813a'),
'wiki.multi.ru': ('wiki.multi.ru.npz',
'3b2eb9163f35e90bf2ce1cd3c997b354d0c34f59'),
'wiki.multi.sk': ('wiki.multi.sk.npz',
'606a0c3ba9849070c6b6b8c22d920fdeed9a1385'),
'wiki.multi.sl': ('wiki.multi.sl.npz',
'3cfdab5043b8cfe1535cb6dbd4c9e68847ad5904'),
'wiki.multi.sv': ('wiki.multi.sv.npz',
'4f1494885b9a831e87cfa3c15f2204c4a73c0779'),
'wiki.multi.tr': ('wiki.multi.tr.npz',
'54f90d5ddb9a65538a41e37c5a67ed933a5e4885'),
'wiki.multi.uk': ('wiki.multi.uk.npz',
'500fd26b1d7a25b42458012e99f9f76642e0c787'),
'wiki.multi.vi': ('wiki.multi.vi.npz',
'3955809cceb300965c15f9372221417719bb0db8'),
'wiki.mus': ('wiki.mus.npz',
'a5f48934a3fa6eaf4929098046c93fc94dd6bcb6'),
'wiki.mwl': ('wiki.mwl.npz',
'8a5e2c272166f8a72c5694ca6c3104d5f49179ec'),
'wiki.my': ('wiki.my.npz',
'5e035aca16700d7d6695af8a6d3a88ac847aaeb7'),
'wiki.myv': ('wiki.myv.npz',
'd4cfaab70c640033e02c0fc0c5a3615ae836c569'),
'wiki.mzn': ('wiki.mzn.npz',
'ad09ac584ae455b5862b95125ef409360ae18445'),
'wiki.nah': ('wiki.nah.npz',
'2dc454ef37d059f2053af46cfa1f4f0ca939cba0'),
'wiki.na': ('wiki.na.npz',
'401f0f880eb7aa78d21348bc1e0a3953b3e81bf0'),
'wiki.nap': ('wiki.nap.npz',
'996da46aeeab5644ba766d00c5e343b1553361d7'),
'wiki.nds_nl': ('wiki.nds_nl.npz',
'5a9307e16b13a5a82ec19a52b33254537e7198e7'),
'wiki.nds': ('wiki.nds.npz',
'b249a87c78c52becf51e7b50aaf9f9b6a36585f1'),
'wiki.ne': ('wiki.ne.npz',
'a601db2647a74ffd2b4b43dcb8584735f555459c'),
'wiki.new': ('wiki.new.npz',
'c398a3775aba9c68ce765cfdfb6b188f7c47e4c6'),
'wiki-news-300d-1M': ('wiki-news-300d-1M.npz',
'0a03bbd508e5381e140476140fb121afeb0050ed'),
'wiki-news-300d-1M-subword': ('wiki-news-300d-1M-subword.npz',
'69edae21375407781c727dcb9e534e79d712d137'),
'wiki.ng': ('wiki.ng.npz',
'befd774d15f69d43547e13e5ea3a97c4cb1ab405'),
'wiki.nl': ('wiki.nl.npz',
'5a7cb6f1dd0a7621202abba9461ac2c5bf905219'),
'wiki.nn': ('wiki.nn.npz',
'8e5059ddeb24050fadaa5cc4622b13feb3e4a226'),
'wiki.no': ('wiki.no.npz',
'5ce6e0f793e66f081652f64013968099de03d9f9'),
'wiki.nov': ('wiki.nov.npz',
'95ed23b4cfd7a65afa1c12c7dbdce6af53923d77'),
'wiki.vec': ('wiki.vec.npz',
'08ebb912efeb9df1c7d05e1af90484d210dff47e'),
'wiki.nrm': ('wiki.nrm.npz',
'e58614b4508ff9810f0b58fd818f973775bc918d'),
'wiki.nso': ('wiki.nso.npz',
'56a2ebe260241402d117cd89c5c872b9c96ff05b'),
'wiki.nv': ('wiki.nv.npz',
'c713051fe03ec1f60314bb42161b2a47fb5e169a'),
'wiki.ny': ('wiki.ny.npz',
'ba5a1725955cbc13e7fd93ab499f8085840c992c'),
'wiki.oc': ('wiki.oc.npz',
'259e7d994c38a4cfc140fb07016b82d6781e5027'),
'wiki.olo': ('wiki.olo.npz',
'0fea70f887def4779ee70a79366b88f1ada65004'),
'wiki.om': ('wiki.om.npz',
'47e2d756b5f8913085d901375c1b4e0b118a4221'),
'wiki.or': ('wiki.or.npz',
'7e274ab060219b019aa02bb97941cc6e162fd01f'),
'wiki.os': ('wiki.os.npz',
'19e8199cc2aaffdb07b6c558dbc5465ac6e03155'),
'wiki.pag': ('wiki.pag.npz',
'eddf4931547649026c02f893297ef673ec6158bb'),
'wiki.pam': ('wiki.pam.npz',
'40109aa174bd9f0fa657839bb548e2b0646c58d3'),
'wiki.pa': ('wiki.pa.npz',
'8a5870717e9e641b1f757f13259171698118de2e'),
'wiki.pap': ('wiki.pap.npz',
'999c8e5b005ca20d9998fbbe4fa79177f69e24c0'),
'wiki.pcd': ('wiki.pcd.npz',
'e975066b323a65cdc5e4c27138ef674d2cf7250b'),
'wiki.pdc': ('wiki.pdc.npz',
'5c770b9d56f276b0aa535845f175c05ee1cea615'),
'wiki.pfl': ('wiki.pfl.npz',
'0063d0b633ee529a75482b36ed4f4da7d64994ec'),
'wiki.pih': ('wiki.pih.npz',
'ce1d76c94d248545eea0d7436c54849dbb380bfc'),
'wiki.pi': ('wiki.pi.npz',
'c7d56c334bf529f8b3655693d207a80feaec4aed'),
'wiki.pl': ('wiki.pl.npz',
'0d612fdf871a1a4084c867f394940475be899443'),
'wiki.pms': ('wiki.pms.npz',
'ca149a2fb138011315bb6d5d61c7a5647e515e51'),
'wiki.pnb': ('wiki.pnb.npz',
'9ec82d02ad8894056c67991cf8ce927bcca74ee2'),
'wiki.pnt': ('wiki.pnt.npz',
'3f90123407bb8fc838a0a0d3700a14e15f5b26aa'),
'wiki.ps': ('wiki.ps.npz',
'7edebc02ac16f5fab83eb10b7d0fab821a9a4d43'),
'wiki.pt': ('wiki.pt.npz',
'f172fd801edd1ad9d319ba44146d40b5d682a473'),
'wiki.qu': ('wiki.qu.npz',
'68bec60ccfe1826c3b3a8968574488dbc74cdf7b'),
'wiki.rm': ('wiki.rm.npz',
'00fb191fc736ba60cb23e76169dfccde9a9daad0'),
'wiki.rmy': ('wiki.rmy.npz',
'c5e93cc37ff7293b9a1d9fe55c42d6fbde372b97'),
'wiki.rn': ('wiki.rn.npz',
'57b8e0d6999269be227af6ef2797a9cf8386ff1b'),
'wiki.roa_rup': ('wiki.roa_rup.npz',
'e06d6b5672a59bb9e83143bc8b28300d23c09546'),
'wiki.roa_tara': ('wiki.roa_tara.npz',
'c083105f40236dc3711f06c1b40e8ee7a714b99d'),
'wiki.ro': ('wiki.ro.npz',
'766bc0cb58a65b0b1763b9a0d90e91ab982eb20d'),
'wiki.rue': ('wiki.rue.npz',
'9a91fa093cd48d7d658d526b0ccda48dc59cd7f4'),
'wiki.ru': ('wiki.ru.npz',
'd59d099481c22d5592ab9635c9ee48060aa0bf45'),
'wiki.rw': ('wiki.rw.npz',
'e99ee87d249f6c157c5c97397d1025d798b85c69'),
'wiki.sah': ('wiki.sah.npz',
'85dae39097b29bc8e2b64f343a77794e4a62f91a'),
'wiki.sa': ('wiki.sa.npz',
'7d1928d7c67400045ac1b35a37a0e3089690d875'),
'wiki.scn': ('wiki.scn.npz',
'27d7b8050bbeed8ce196061c610216760b053c39'),
'wiki.sc': ('wiki.sc.npz',
'69c7b8be0f03a1bbd615695f93bdd78f96a58e16'),
'wiki.sco': ('wiki.sco.npz',
'4880282f59d3338b67fbff75359e2d24896e95bb'),
'wiki.sd': ('wiki.sd.npz',
'0ed8da4d27223db717a612cf0c88582351db6e19'),
'wiki.se': ('wiki.se.npz',
'0f4b2e060d5e29f96ca73aab29c967e79db69c17'),
'wiki.sg': ('wiki.sg.npz',
'a5e4edf34fe1a88b322da4c3922ec5a470e200c6'),
'wiki.sh': ('wiki.sh.npz',
'c13f1e94676bc939560193f7aa7ffd7d604707b3'),
'wiki.simple': ('wiki.simple.npz',
'352d0575e7d60b08e1dfce2c5de713906f0ed78f'),
'wiki.si': ('wiki.si.npz',
'204f9ffbe7770a9f56d3b2fb26999165015f5c33'),
'wiki.sk': ('wiki.sk.npz',
'7a9820b5a343b242660bf2595d1ecbf6e00a76d6'),
'wiki.sl': ('wiki.sl.npz',
'85f3186f26d6725317a64e290363a7251b928b81'),
'wiki.sm': ('wiki.sm.npz',
'9e13452cc4bff677f4f15db04f9d2f95f6ec054c'),
'wiki.sn': ('wiki.sn.npz',
'e8d5f7dcf51280c5f99bc3df849b4889a61e9fcd'),
'wiki.so': ('wiki.so.npz',
'0f5d71b95768b33fd939a870c15344c4478364a9'),
'wiki.sq': ('wiki.sq.npz',
'8b05826df8575e65c87a2fc0b7630cf644d4216d'),
'wiki.srn': ('wiki.srn.npz',
'2711396ef297ac5dde8904508bc002bdecbcc6f4'),
'wiki.sr': ('wiki.sr.npz',
'546edc8e29a5d2e99ed10eb4a552cbef2bb8f417'),
'wiki.ss': ('wiki.ss.npz',
'2e5911bad79bb5270a64f587e326d31c95ec58f3'),
'wiki.st': ('wiki.st.npz',
'23bc954719a2962e891f02efaea754c9ea025894'),
'wiki.stq': ('wiki.stq.npz',
'dd3ece0c0aa30e53ae0f4b558309bb60ab628652'),
'wiki.su': ('wiki.su.npz',
'7e48732e8a1fcf212e692924a4416a6ac3b3b055'),
'wiki.sv': ('wiki.sv.npz',
'b9ec52e9423688f195f3145c243226c0e0b51e83'),
'wiki.sw': ('wiki.sw.npz',
'5262f0c645322b10eca73f792a970f10b2719e55'),
'wiki.szl': ('wiki.szl.npz',
'fdd6d6b291cdbbcec5ff93451a588fdd103bb2d0'),
'wiki.ta': ('wiki.ta.npz',
'da7c5bc6e1142306ff2669bf1739832beb6c1763'),
'wiki.tcy': ('wiki.tcy.npz',
'baa49e1afa2bb0dcaaef0fac1ee75bbe711d1134'),
'wiki.te': ('wiki.te.npz',
'baf48767ce85e4d41d65d25f2bbf1c5f559ec18f'),
'wiki.tet': ('wiki.tet.npz',
'11e46a893af55344dbe102d530fdfea5d949d3bc'),
'wiki.tg': ('wiki.tg.npz',
'da66abb72ec9ccc602713161e544963d59cc51d7'),
'wiki.th': ('wiki.th.npz',
'25e54bf2d305779ec9baa5f344410bd75c7702fc'),
'wiki.ti': ('wiki.ti.npz',
'1faf98f3a0eafa7559a4b2a111f43dd1f7b9a05b'),
'wiki.tk': ('wiki.tk.npz',
'34c714fa8275fd6abfe86b2d144a043774552a6c'),
'wiki.tl': ('wiki.tl.npz',
'7d7f8a0485155bce7a74a1d778824375b0029f53'),
'wiki.tn': ('wiki.tn.npz',
'd0bc3a9b948753ac2283e5e10480c9fa0f6acb53'),
'wiki.to': ('wiki.to.npz',
'e982fc31bcfcf7339988d7aad21ce29ac9e84b0b'),
'wiki.tpi': ('wiki.tpi.npz',
'448cef043fa4b7f97825dbf8ee205ef05543bcac'),
'wiki.tr': ('wiki.tr.npz',
'c9830607a4c5134c6191006f1d80bae0ec798fe6'),
'wiki.ts': ('wiki.ts.npz',
'84a0598803712c8a713943447ddb73fc0f39af43'),
'wiki.tt': ('wiki.tt.npz',
'82c29df18f33e6284af3e977a6dda7e132a7a225'),
'wiki.tum': ('wiki.tum.npz',
'358990b894a3fb09d70674465952d828c9b0eda7'),
'wiki.tw': ('wiki.tw.npz',
'1e6d2838a4f271c1808795fb929cfcbf95094d93'),
'wiki.ty': ('wiki.ty.npz',
'e41ca5192d8cb515b3561c8d6935b150deb027b7'),
'wiki.tyv': ('wiki.tyv.npz',
'ce062ed32e854604714b65698ae290c99ba28060'),
'wiki.udm': ('wiki.udm.npz',
'9e1c5891ee0c5ac8f65fc457e1b42c7b2bfc8d37'),
'wiki.ug': ('wiki.ug.npz',
'656503e54063e200980e39f00fc011395bcd8551'),
'wiki.uk': ('wiki.uk.npz',
'352b7ee24d9fc6513fff4fe13bc04086c680834a'),
'wiki.ur': ('wiki.ur.npz',
'a81e55c7adfc2cef779ce9a01fe21319a7e4943b'),
'wiki.uz': ('wiki.uz.npz',
'd60d1e67bb8574dd71c18c88114aba674fc1eecb'),
'wiki.ve': ('wiki.ve.npz',
'5bfc3dbb3e47d23597df47ef12bd1c64ab8d3ea9'),
'wiki.vep': ('wiki.vep.npz',
'7a94355754fbe56802242c0bf9d7a27335095552'),
'wiki.vi': ('wiki.vi.npz',
'f118039eb16a4ca3347b6b171eac41113350a041'),
'wiki.vls': ('wiki.vls.npz',
'9a46a2fdc6448aa54f212081643745499ea7d05c'),
'wiki.vo': ('wiki.vo.npz',
'8e2f93c85ac608bcc4ae14093b9ff016061378fb'),
'wiki.wa': ('wiki.wa.npz',
'907074f7743d30cdbb2c48d0c8b4040796ea4164'),
'wiki.war': ('wiki.war.npz',
'928fb410c394b9c18d875326b6a3e750e2611e1b'),
'wiki.wo': ('wiki.wo.npz',
'7bb352be44f7261aa926f49b13e77df30f29312f'),
'wiki.wuu': ('wiki.wuu.npz',
'0d1dc7b05867ff2156a1180ad3da3b4697924e59'),
'wiki.xal': ('wiki.xal.npz',
'd87f4a131e086dc0bdc2a7e10406820c3c03b6a9'),
'wiki.xh': ('wiki.xh.npz',
'c64e1d2e77d1c744a628e2bd7353284616e48bea'),
'wiki.xmf': ('wiki.xmf.npz',
'160b9ee9773b9099aaf37ae9bdbc8a4a93b7f6ea'),
'wiki.yi': ('wiki.yi.npz',
'0662542cee29f3392fc905004ac6443b32c1477c'),
'wiki.yo': ('wiki.yo.npz',
'5d12d3b902a1fa19d8548295c3802c0608afa5c8'),
'wiki.za': ('wiki.za.npz',
'536348ff89df62e968739b567a1245bfd4112fbe'),
'wiki.zea': ('wiki.zea.npz',
'61fa192289a7c0f73ffa8035632a38b91c31c224'),
'wiki.zh_classical': ('wiki.zh_classical.npz',
'9acc9eaf8ebe316b945fb1f56ac71a2b7e024854'),
'wiki.zh_min_nan': ('wiki.zh_min_nan.npz',
'5d38bc025c82af578299d60f7df7b399de6ed81a'),
'wiki.zh': ('wiki.zh.npz',
'94007fcf3b105bf2c21b84a3a22bdb7946e74804'),
'wiki.zh_yue': ('wiki.zh_yue.npz',
'af6f0d94e6418d528d6cedd859e07e6e2fb416ab'),
'wiki.zu': ('wiki.zu.npz',
'fc9ce07d5d0c49a3c86cf1b26056ada58f9404ca')}
GOOGLEANALOGY_CATEGORIES = [
'capital-common-countries', 'capital-world', 'currency', 'city-in-state',
'family', 'gram1-adjective-to-adverb', 'gram2-opposite',
'gram3-comparative', 'gram4-superlative', 'gram5-present-participle',
'gram6-nationality-adjective', 'gram7-past-tense', 'gram8-plural',
'gram9-plural-verbs'
]
BATS_CHECKSUMS = \
{'BATS_3.0/1_Inflectional_morphology/I01 [noun - plural_reg].txt':
'cfcba2835edf81abf11b84defd2f4daa3ca0b0bf',
'BATS_3.0/1_Inflectional_morphology/I02 [noun - plural_irreg].txt':
'44dbc56432b79ff5ce2ef80b6840a8aa916524f9',
'BATS_3.0/1_Inflectional_morphology/I03 [adj - comparative].txt':
'dc530918e98b467b8102a7dab772a66d3db32a73',
'BATS_3.0/1_Inflectional_morphology/I04 [adj - superlative].txt':
'6c6fdfb6c733bc9b298d95013765163f42faf6fb',
'BATS_3.0/1_Inflectional_morphology/I05 [verb_inf - 3pSg].txt':
'39fa47ec7238ddb3f9818bc586f23f55b55418d8',
'BATS_3.0/1_Inflectional_morphology/I06 [verb_inf - Ving].txt':
'8fabeb9f5af6c3e7154a220b7034bbe5b900c36f',
'BATS_3.0/1_Inflectional_morphology/I07 [verb_inf - Ved].txt':
'aa04df95aa2edb436cbcc03c7b15bc492ece52d6',
'BATS_3.0/1_Inflectional_morphology/I08 [verb_Ving - 3pSg].txt':
'5f22d8121a5043ce76d3b6b53a49a7bb3fe33920',
'BATS_3.0/1_Inflectional_morphology/I09 [verb_Ving - Ved].txt':
'377777c1e793c638e72c010228156d01f916708e',
'BATS_3.0/1_Inflectional_morphology/I10 [verb_3pSg - Ved].txt':
'051c0c3c633e10900f827991dac14cf76da7f022',
'BATS_3.0/2_Derivational_morphology/D01 [noun+less_reg].txt':
'5d6839e9d34ee1e9fddb5bbf6516cf6420b85d8d',
'BATS_3.0/2_Derivational_morphology/D02 [un+adj_reg].txt':
'80b82227a0d5f7377f1e8cebe28c582bfeb1afb5',
'BATS_3.0/2_Derivational_morphology/D03 [adj+ly_reg].txt':
'223e120bd61b3116298a253f392654c15ad5a39a',
'BATS_3.0/2_Derivational_morphology/D04 [over+adj_reg].txt':
'a56f8685af489bcd09c36f864eba1657ce0a7c28',
'BATS_3.0/2_Derivational_morphology/D05 [adj+ness_reg].txt':
'5da99b1f1781ecfb4a1a7448c715abf07451917b',
'BATS_3.0/2_Derivational_morphology/D06 [re+verb_reg].txt':
'4c5e1796091fade503fbf0bfc2fae2c7f98b5dd2',
'BATS_3.0/2_Derivational_morphology/D07 [verb+able_reg].txt':
'a6218162bc257d98e875fc667c23edfac59e19fd',
'BATS_3.0/2_Derivational_morphology/D08 [verb+er_irreg].txt':
'9a4236c3bbc23903e101a42fb5ad6e15e552fadf',
'BATS_3.0/2_Derivational_morphology/D09 [verb+tion_irreg].txt':
'3ab0153926d5cf890cf08a4077da6d9946133874',
'BATS_3.0/2_Derivational_morphology/D10 [verb+ment_irreg].txt':
'2a012b87a9a60e128e064c5fe24b60f99e16ddce',
'BATS_3.0/3_Encyclopedic_semantics/E01 [country - capital].txt':
'9890315d3c4e6a38b8ae5fc441858564be3d3dc4',
'BATS_3.0/3_Encyclopedic_semantics/E02 [country - language].txt':
'ef08a00e8ff7802811ace8f00fabac41b5d03678',
'BATS_3.0/3_Encyclopedic_semantics/E03 [UK_city - county].txt':
'754957101c93a25b438785bd4458404cd9010259',
'BATS_3.0/3_Encyclopedic_semantics/E04 [name - nationality].txt':
'71a6562c34fb6154992a7c3e499375fcc3529c96',
'BATS_3.0/3_Encyclopedic_semantics/E05 [name - occupation].txt':
'a9a6f9f1af959aef83106f3dbd6bed16dfe9a3ea',
'BATS_3.0/3_Encyclopedic_semantics/E06 [animal - young].txt':
'12d5b51c7b76b9136eadc719abc8cf4806c67b73',
'BATS_3.0/3_Encyclopedic_semantics/E07 [animal - sound].txt':
'91991b007a35f45bd42bd7d0d465c6f8311df911',
'BATS_3.0/3_Encyclopedic_semantics/E08 [animal - shelter].txt':
'e5af11e216db392986ba0cbb597d861066c29adb',
'BATS_3.0/3_Encyclopedic_semantics/E09 [things - color].txt':
'd30b2eb2fc7a60f19afda7c54582e30f6fe28f51',
'BATS_3.0/3_Encyclopedic_semantics/E10 [male - female].txt':
'247a588671bc1da8f615e14076bd42573d24b4b3',
'BATS_3.0/4_Lexicographic_semantics/L01 [hypernyms - animals].txt':
'4b5c4dabe2c9c038fafee85d8d3958f1b1dec987',
'BATS_3.0/4_Lexicographic_semantics/L02 [hypernyms - misc].txt':
'83d5ecad78d9de28fd70347731c7ee5918ba43c9',
'BATS_3.0/4_Lexicographic_semantics/L03 [hyponyms - misc].txt':
'a8319856ae2f76b4d4c030ac7e899bb3a06a9a48',
'BATS_3.0/4_Lexicographic_semantics/L04 [meronyms - substance].txt':
'c081e1104e1b40725063f4b39d13d1ec12496bfd',
'BATS_3.0/4_Lexicographic_semantics/L05 [meronyms - member].txt':
'bcbf05f3be76cef990a74674a9999a0bb9790a07',
'BATS_3.0/4_Lexicographic_semantics/L06 [meronyms - part].txt':
'2f9bdcc74b881e1c54b391c9a6e7ea6243b3accc',
'BATS_3.0/4_Lexicographic_semantics/L07 [synonyms - intensity].txt':
'8fa287860b096bef004fe0f6557e4f686e3da81a',
'BATS_3.0/4_Lexicographic_semantics/L08 [synonyms - exact].txt':
'a17c591961bddefd97ae5df71f9d1559ce7900f4',
'BATS_3.0/4_Lexicographic_semantics/L09 [antonyms - gradable].txt':
'117fbb86504c192b33a5469f2f282e741d9c016d',
'BATS_3.0/4_Lexicographic_semantics/L10 [antonyms - binary].txt':
'3cde2f2c2a0606777b8d7d11d099f316416a7224'}
BATS_CATEGORIES = {
'I01': '[noun - plural_reg]',
'I02': '[noun - plural_irreg]',
'I03': '[adj - comparative]',
'I04': '[adj - superlative]',
'I05': '[verb_inf - 3pSg]',
'I06': '[verb_inf - Ving]',
'I07': '[verb_inf - Ved]',
'I08': '[verb_Ving - 3pSg]',
'I09': '[verb_Ving - Ved]',
'I10': '[verb_3pSg - Ved]',
'D01': '[noun+less_reg]',
'D02': '[un+adj_reg]',
'D03': '[adj+ly_reg]',
'D04': '[over+adj_reg]',
'D05': '[adj+ness_reg]',
'D06': '[re+verb_reg]',
'D07': '[verb+able_reg]',
'D08': '[verb+er_irreg]',
'D09': '[verb+tion_irreg]',
'D10': '[verb+ment_irreg]',
'E01': '[country - capital]',
'E02': '[country - language]',
'E03': '[UK_city - county]',
'E04': '[name - nationality]',
'E05': '[name - occupation]',
'E06': '[animal - young]',
'E07': '[animal - sound]',
'E08': '[animal - shelter]',
'E09': '[things - color]',
'E10': '[male - female]',
'L01': '[hypernyms - animals]',
'L02': '[hypernyms - misc]',
'L03': '[hyponyms - misc]',
'L04': '[meronyms - substance]',
'L05': '[meronyms - member]',
'L06': '[meronyms - part]',
'L07': '[synonyms - intensity]',
'L08': '[synonyms - exact]',
'L09': '[antonyms - gradable]',
'L10': '[antonyms - binary]'
}
SEMEVAL17_CHECKSUMS = \
{'SemEval17-Task2/README.txt':
'ad02d4c22fff8a39c9e89a92ba449ec78750af6b',
'SemEval17-Task2/task2-scorer.jar':
'145ef73ce955656d59e3b67b41f8152e8ee018d8',
'SemEval17-Task2/test/subtask1-monolingual/data/de.test.data.txt':
'6fc840f989d2274509549e472a68fb88dd2e149f',
'SemEval17-Task2/test/subtask1-monolingual/data/en.test.data.txt':
'05293fcbd80b2f4aad9b6518ce1a546ad8f61f33',
'SemEval17-Task2/test/subtask1-monolingual/data/es.test.data.txt':
'552904b5988f9951311290ca8fa0441dd4351d4b',
'SemEval17-Task2/test/subtask1-monolingual/data/fa.test.data.txt':
'29d5970feac5982961bd6ab621ba31f83d3bff77',
'SemEval17-Task2/test/subtask1-monolingual/data/it.test.data.txt':
'c95fe2be8fab37e9c70610117bdedc48a0a8e95c',
'SemEval17-Task2/test/subtask1-monolingual/keys/de.test.gold.txt':
'c51463460495a242cc726d41713c5e00b66fdd18',
'SemEval17-Task2/test/subtask1-monolingual/keys/en.test.gold.txt':
'2d2bb2ed41308cc60e7953cc9036f7dc89141b48',
'SemEval17-Task2/test/subtask1-monolingual/keys/es.test.gold.txt':
'a5842ff17fe3847d15414924826a8eb236018bcc',
'SemEval17-Task2/test/subtask1-monolingual/keys/fa.test.gold.txt':
'717bbe035d8ae2bad59416eb3dd4feb7238b97d4',
'SemEval17-Task2/test/subtask1-monolingual/keys/it.test.gold.txt':
'a342b950109c73afdc86a7829e17c1d8f7c482f0',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-es.test.data.txt':
'ef92b1375762f68c700e050d214d3241ccde2319',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-fa.test.data.txt':
'17aa103981f3193960309bb9b4cc151acaf8136c',
'SemEval17-Task2/test/subtask2-crosslingual/data/de-it.test.data.txt':
'eced15e8565689dd67605a82a782d19ee846222a',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-de.test.data.txt':
'5cb69370a46385a7a3d37cdf2018744be77203a0',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-es.test.data.txt':
'402f7fed52b60e915fb1be49f935395488cf7a7b',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-fa.test.data.txt':
'9bdddbbde3da755f2a700bddfc3ed1cd9324ad48',
'SemEval17-Task2/test/subtask2-crosslingual/data/en-it.test.data.txt':
'd3b37aac79ca10311352309ef9b172f686ecbb80',
'SemEval17-Task2/test/subtask2-crosslingual/data/es-fa.test.data.txt':
'a2959aec346c26475a4a6ad4d950ee0545f2381e',
'SemEval17-Task2/test/subtask2-crosslingual/data/es-it.test.data.txt':
'ca627c30143d9f82a37a8776fabf2cee226dd35c',
'SemEval17-Task2/test/subtask2-crosslingual/data/it-fa.test.data.txt':
'a03d79a6ce7b798356b53b4e85dbe828247b97ef',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-es.test.gold.txt':
'7564130011d38daad582b83135010a2a58796df6',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-fa.test.gold.txt':
'c9e23c2e5e970e7f95550fbac3362d85b82cc569',
'SemEval17-Task2/test/subtask2-crosslingual/keys/de-it.test.gold.txt':
'b74cc2609b2bd2ceb5e076f504882a2e0a996a3c',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-de.test.gold.txt':
'428dfdad2a144642c13c24b845e6b7de6bf5f663',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-es.test.gold.txt':
'1dd7ab08a10552486299151cdd32ed19b56db682',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-fa.test.gold.txt':
'17451ac2165aa9b695dae9b1aba20eb8609fb400',
'SemEval17-Task2/test/subtask2-crosslingual/keys/en-it.test.gold.txt':
'5041c0b84a603ed85aa0a5cbe4b1c34f69a2fa7c',
'SemEval17-Task2/test/subtask2-crosslingual/keys/es-fa.test.gold.txt':
'8c09a219670dc32ab3864078bf0c28a287accabc',
'SemEval17-Task2/test/subtask2-crosslingual/keys/es-it.test.gold.txt':
'b1cdd13209354cc2fc2f4226c80aaa85558daf4a',
'SemEval17-Task2/test/subtask2-crosslingual/keys/it-fa.test.gold.txt':
'e0b560bb1d2db39ce45e841c8aad611734dc94f1',
'SemEval17-Task2/trial/subtask1-monolingual/data/de.trial.data.txt':
'dd071fd90f59bec8d271a447d86ee2e462941f52',
'SemEval17-Task2/trial/subtask1-monolingual/data/en.trial.data.txt':
'e8e5add0850b3dec07f102be26b8791a5e9bbbcf',
'SemEval17-Task2/trial/subtask1-monolingual/data/es.trial.data.txt':
'8956c78ff9ceae1d923a57816e55392c6a7dfc49',
'SemEval17-Task2/trial/subtask1-monolingual/data/fa.trial.data.txt':
'2f7c4247cde0d918b3508e90f6b49a1f5031c81b',
'SemEval17-Task2/trial/subtask1-monolingual/data/it.trial.data.txt':
'c11e0b5b55f94fc97c7b11fa455e71b071be879f',
'SemEval17-Task2/trial/subtask1-monolingual/keys/de.trial.gold.txt':
'ce5567b1accf3eb07da53229dfcb2a8a1dfac380',
'SemEval17-Task2/trial/subtask1-monolingual/keys/en.trial.gold.txt':
'693cb5928e807c79e39136dc0981dadca7832ae6',
'SemEval17-Task2/trial/subtask1-monolingual/keys/es.trial.gold.txt':
'8241ca66bf5ba55f77607e9bcfae8e34902715d8',
'SemEval17-Task2/trial/subtask1-monolingual/keys/fa.trial.gold.txt':
'd30701a93c8c5500b82ac2334ed8410f9a23864b',
'SemEval17-Task2/trial/subtask1-monolingual/keys/it.trial.gold.txt':
'bad225573e1216ba8b35429e9fa520a20e8ce031',
'SemEval17-Task2/trial/subtask1-monolingual/output/de.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/en.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/es.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/fa.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask1-monolingual/output/it.trial.sample.output.txt':
'f85cba9f6690d61736623c16e620826b09384aa5',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-es.trial.data.txt':
'c27c8977d8d4434fdc3e59a7b0121d87e0a03237',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-fa.trial.data.txt':
'88a6f6dd1bba309f7cae7281405e37f442782983',
'SemEval17-Task2/trial/subtask2-crosslingual/data/de-it.trial.data.txt':
'ebdab0859f3b349fa0120fc8ab98be3394f0d73d',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-de.trial.data.txt':
'128d1a460fe9836b66f0fcdf59455b02edb9f258',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-es.trial.data.txt':
'508c5dde8ffcc32ee3009a0d020c7c96a338e1d1',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-fa.trial.data.txt':
'1a3640eb5facfe15b1e23a07183a2e62ed80c7d9',
'SemEval17-Task2/trial/subtask2-crosslingual/data/en-it.trial.data.txt':
'141c83d591b0292016583d9c23a2cc5514a006aa',
'SemEval17-Task2/trial/subtask2-crosslingual/data/es-fa.trial.data.txt':
'a0a548cd698c389ee80c34d6ec72abed5f1625e5',
'SemEval17-Task2/trial/subtask2-crosslingual/data/es-it.trial.data.txt':
'8d42bed8a43ff93d26ca95794758d9392ca707ed',
'SemEval17-Task2/trial/subtask2-crosslingual/data/it-fa.trial.data.txt':
'9c85223f1f734de61c28157df0ce417bb0537803',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-es.trial.gold.txt':
'126c92b2fb3b8f2784dd4ae2a4c52b02a87a8196',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-fa.trial.gold.txt':
'1db6201c2c8f19744c39dbde8bd4a803859d64c1',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-it.trial.gold.txt':
'5300bf2ead163ff3981fb41ec5d0e291c287c9e0',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-de.trial.gold.txt':
'd4f5205de929bb0c4020e1502a3f2204b5accd51',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-es.trial.gold.txt':
'3237e11c3a0d9c0f5d583f8dc1d025b97a1f8bfe',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-fa.trial.gold.txt':
'c14de7bf326907336a02d499c9b92ab229f3f4f8',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-it.trial.gold.txt':
'3c0276c4b4e7a6d8a618bbe1ab0f30ad7b07929c',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-fa.trial.gold.txt':
'359f69e9dfd6411a936baa3392b8f05c398a7707',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-it.trial.gold.txt':
'44090607fabe5a26926a384e521ef1317f6f00d0',
'SemEval17-Task2/trial/subtask2-crosslingual/keys/it-fa.trial.gold.txt':
'97b09ffa11803023c2143fd4a4ac4bbc9775e645',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-es.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-fa.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/de-it.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-de.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-es.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-fa.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/en-it.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240',
'SemEval17-Task2/trial/subtask2-crosslingual/output/es-fa.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/es-it.trial.sample.output.txt':
'b71166d8615e921ee689cefc81419398d341167f',
'SemEval17-Task2/trial/subtask2-crosslingual/output/it-fa.trial.sample.output.txt':
'a0735361a692be357963959728dacef85ea08240'}
UD21_DATA_FILE_SHA1 = \
{'af': {'dev': ('af-ud-dev.conllu',
'e37b104f4425ee00afc81779201816d5ac525194'),
'test': ('af-ud-test.conllu',
'd2bf02370d308ee957c04242bd0871db0e488389'),
'train': ('af-ud-train.conllu',
'a652c7b19c236063d3ea489947f83095893b699a')},
'grc_proiel': {'dev': ('grc_proiel-ud-dev.conllu',
'd199530c7e40ff0214e510957bb126af0dc12c1c'),
'test': ('grc_proiel-ud-test.conllu',
'bb7825ddeb18fc2d86638e4725f04563f3e08aab'),
'train': ('grc_proiel-ud-train.conllu',
'fe6c861299b033abe8c4ce2b6131cd74f87b96a7')},
'grc': {'dev': ('grc-ud-dev.conllu',
'debdfec0272cd558ccd29fe0ae2f13175dd20a33'),
'test': ('grc-ud-test.conllu',
'f19accf31db95e2c736d716d3438c09aa877eb07'),
'train': ('grc-ud-train.conllu',
'e98d3eabea67787c5d43a498f5a0fa4246f38104')},
'ar_nyuad': {'dev': ('ar_nyuad-ud-dev.conllu',
'b740de9bd68e68b30b9b313eb050d44e94470ca5'),
'test': ('ar_nyuad-ud-test.conllu',
'f5d5b8979b7fedd76235d4bae77e0b4a7b0a750a'),
'train': ('ar_nyuad-ud-train.conllu',
'd065f03958fd8782a7431b6778c6665ad09444a6')},
'ar_pud': {'test': ('ar_pud-ud-test.conllu',
'2161701e6726b6feb14733a312fba6160b9eb722')},
'ar': {'dev': ('ar-ud-dev.conllu',
'5f8964974d5ba5eb3504cdafb93c34c473c4177c'),
'test': ('ar-ud-test.conllu',
'58df161047f310cc3bb4d0e615ca33466e630bb9'),
'train': ('ar-ud-train.conllu',
'0a3d5cefa1fecd6a74f2016ee73ea7a7a02eb359')},
'eu': {'dev': ('eu-ud-dev.conllu',
'3ee15b5ed46ec93d7278c8cc0351d242417d553d'),
'test': ('eu-ud-test.conllu',
'aa68d6442ac6dc1abedc19c1b98c4a9944786188'),
'train': ('eu-ud-train.conllu',
'd56ec997916e38ee6ab1badd78c119e81e4797c9')},
'be': {'dev': ('be-ud-dev.conllu',
'015473e91cf8937c46e8b721f206415abac16a35'),
'test': ('be-ud-test.conllu',
'f009ea1885f54cfd77fca8a2c89133b2af8f9f5e'),
'train': ('be-ud-train.conllu',
'26b871e28d2f356a709f106b6e3e86b417ba74e7')},
'bg': {'dev': ('bg-ud-dev.conllu',
'0a2284b10547681eb65691eb2a9f0f1662e16e90'),
'test': ('bg-ud-test.conllu',
'75ea2a5e1d55bb57efecae6ec2b5ac3cc1b37e57'),
'train': ('bg-ud-train.conllu',
'd4b2fa267010c4486885c91f3af65ff66c8be94c')},
'bxr': {'sample': ('bxr-ud-sample.conllu',
'9239bdd251a60820c71111ec54de9e7d58a8579d'),
'test': ('bxr-ud-test.conllu',
'0a06e527454ae0b547153222f67eb5db94e528fd')},
'yue': {'test': ('yue-ud-test.conllu',
'd91477c65aa75cd45489cca13f7a122066972bdb')},
'ca': {'dev': ('ca-ud-dev.conllu',
'5737824f0afff0d07a43db331f102d62c6da2d96'),
'test': ('ca-ud-test.conllu',
'0e28bd2a3b982515c1158194ad52bcbbe741e170'),
'train': ('ca-ud-train.conllu',
'b5ff2392722d4a1df3bfc52fa5b8f2043b7aec0c')},
'zh_cfl': {'test': ('zh_cfl-ud-test.conllu',
'32fe45cd0e4e11ced95202971bce74acbc6a8c30')},
'zh_hk': {'test': ('zh_hk-ud-test.conllu',
'4c75fa5bbcdcb181447b4e037224d50feb2776fb')},
'zh_pud': {'test': ('zh_pud-ud-test.conllu',
'b3e448884b7b6229379f9723b97c6e9a6fedcb61')},
'zh': {'dev': ('zh-ud-dev.conllu',
'34d8253b35ad2245d59ddffa71b5689ef267b6b2'),
'test': ('zh-ud-test.conllu',
'0f00516097650c12262298dd0fbd1b17a6d2bfe2'),
'train': ('zh-ud-train.conllu',
'9444eec5f4561f289ad140e47e49013689512a65')},
'cop': {'dev': ('cop-ud-dev.conllu',
'863d1004df1a92df52515105f6fae6ff68539595'),
'test': ('cop-ud-test.conllu',
'd3b33566679f071d4ad622ad840cd98381835706'),
'train': ('cop-ud-train.conllu',
'33d0e5de5d6077f7c52a4cd90bce0047f3e9ff6f')},
'hr': {'dev': ('hr-ud-dev.conllu',
'8da2a419980807d2e91e09b6bf496e58d442b0ba'),
'test': ('hr-ud-test.conllu',
'49d673cba3d32d39d413e557276a45a0214ed83e'),
'train': ('hr-ud-train.conllu',
'e5cc686bb46c80c84c3ac60ed459e1f124c04c08')},
'cs_cac': {'dev': ('cs_cac-ud-dev.conllu',
'69dfed28c29146b41a3428f4715bde70a6aecf00'),
'test': ('cs_cac-ud-test.conllu',
'a994b33ebbde486c1818a9df460fb112055e95de'),
'train': ('cs_cac-ud-train.conllu',
'694f8559471dc481612606bf5df078daa094a84e')},
'cs_cltt': {'dev': ('cs_cltt-ud-dev.conllu',
'f35d5dbe57cd95760901ea29de4f493d5d2a44d4'),
'test': ('cs_cltt-ud-test.conllu',
'a8f6696785e658471f759bc736b738a105cba9a3'),
'train': ('cs_cltt-ud-train.conllu',
'ab97886066bfa462e5da03d25f802489292c0b56')},
'cs_fictree': {'dev': ('cs_fictree-ud-dev.conllu',
'dc67c07737a3a8bf2633068941f2d55f1500e192'),
'test': ('cs_fictree-ud-test.conllu',
'06becaedef1cfdb8e1b2dce3f0d3a3a607d178a4'),
'train': ('cs_fictree-ud-train.conllu',
'fe7dbe3a0e6ee73e19e788c43bbb8f8f47ae1645')},
'cs_pud': {'test': ('cs_pud-ud-test.conllu',
'9f205677041de694157ba2ef3e1eadb44d467f2f')},
'cs': {'dev': ('cs-ud-dev.conllu',
'd609e895b21b8710337e23a98b58ffd7b7a54bf1'),
'test': ('cs-ud-test.conllu',
'34091286a11b1ce2a9c8bcfa03fdd86fb0e13965'),
'train': ('cs-ud-train.conllu',
'd1f855798a29d433b580d01ade0d8d062cd58534')},
'da': {'dev': ('da-ud-dev.conllu',
'2c0c798c20a2efb30273172d388342a82bb0ce3c'),
'test': ('da-ud-test.conllu',
'85a95a8527f8773f1575ceaf0ab51f204b211047'),
'train': ('da-ud-train.conllu',
'b653c029a7ae5c106f865dcef949fb3fe2aa0420')},
'nl_lassysmall': {'dev': ('nl_lassysmall-ud-dev.conllu',
'2a169af74c2206c9073c3932b4a300492a314ee5'),
'test': ('nl_lassysmall-ud-test.conllu',
'39f08896a40ad370f2acc37d58689cdc43a660a9'),
'train': ('nl_lassysmall-ud-train.conllu',
'e4fd6bac246c81bb17a3c932e251b8662739cc19')},
'nl': {'dev': ('nl-ud-dev.conllu',
'33a9387eef9f5c0b15bd1e76e78776863f1f6d90'),
'test': ('nl-ud-test.conllu',
'01b3e1048792c851fdd59882c353fcdb76dc165e'),
'train': ('nl-ud-train.conllu',
'8e6a10152b7d09ce61433dd5f715ab2401611cf6')},
'en_lines': {'dev': ('en_lines-ud-dev.conllu',
'83b63b7670ea4394b558bc26e16a004339f0a0ef'),
'test': ('en_lines-ud-test.conllu',
'ccc9d3c71a873313d138c3adb12405a97eb270d8'),
'train': ('en_lines-ud-train.conllu',
'da42bfac9fd97d98ebbbc37c65d83ff4c53b4e79')},
'en_pud': {'test': ('en_pud-ud-test.conllu',
'4a9c83ba058a7e51979af790ba0440cc274b948f')},
'en_partut': {'dev': ('en_partut-ud-dev.conllu',
'863a6f571158acaaca95223e50bd08fc0c1134f0'),
'test': ('en_partut-ud-test.conllu',
'0c0780b0f14e4623f1014e6496d639cd2d2f6ffd'),
'train': ('en_partut-ud-train.conllu',
'e00a2d6f7efa28c8aaa40dccdf29b59a50f48e18')},
'en': {'dev': ('en-ud-dev.conllu',
'e2159dda4400d289ad8a403b466c8d23d733ba35'),
'test': ('en-ud-test.conllu',
'bd36ef23f76155625b379d063427bd62f19b7658'),
'train': ('en-ud-train.conllu',
'993c44f62104971fe2d056847349facbb7986258')},
'et': {'dev': ('et-ud-dev.conllu',
'312f9477f7ee1dd380c1fbcf77a6f0c63476fdbb'),
'test': ('et-ud-test.conllu',
'd70907f0771b41a27406672b9d91043a0954f946'),
'train': ('et-ud-train.conllu',
'b6d788e7a3362d0984d1cff06c1ba3d66f6bf773')},
'fi_ftb': {'dev': ('fi_ftb-ud-dev.conllu',
'552ec574acdb3209e7545af4e16a43a1e2956979'),
'test': ('fi_ftb-ud-test.conllu',
'13c34838a0fa9e379f9624ed1f4c368ca50a7d98'),
'train': ('fi_ftb-ud-train.conllu',
'73d025250bfc82a24181b5ed601dc4ae7c8e846c')},
'fi_pud': {'test': ('fi_pud-ud-test.conllu',
'4ab7b0d99ce6697d79732e401be97585a28c2afa')},
'fi': {'dev': ('fi-ud-dev.conllu',
'e023cf7eaffbda20bd4518d87fe9086207bb5361'),
'test': ('fi-ud-test.conllu',
'fd57c5106e43994250f4472890572bdbb8b4a48b'),
'train': ('fi-ud-train.conllu',
'ab27bda8cbb62886196b78de87985a4c6cf8215d')},
'fr_ftb': {'dev': ('fr_ftb-ud-dev.conllu',
'71b3cc02601f64711f98e33a6b2af10aa00700be'),
'test': ('fr_ftb-ud-test.conllu',
'723b8c44e74202a18b7e71268b738a5e1aa15f86'),
'train': ('fr_ftb-ud-train.conllu',
'9a347120478254647deb7c7e02871b28aad23ec4')},
'fr_pud': {'test': ('fr_pud-ud-test.conllu',
'570b7e31dc359ed62123bea6546efa13cfc2cf25')},
'fr_partut': {'dev': ('fr_partut-ud-dev.conllu',
'1505030048829a8dccc466cc86bca057996301ae'),
'test': ('fr_partut-ud-test.conllu',
'f6446317c9f82cc0b70a76be75282804a3359ac0'),
'train': ('fr_partut-ud-train.conllu',
'f87c246cfa91186b90c7780cb64783034f196622')},
'fr_sequoia': {'dev': ('fr_sequoia-ud-dev.conllu',
'859b10d80c7b3a382571cce9b2620039673539d1'),
'test': ('fr_sequoia-ud-test.conllu',
'be0ef69e392e64030414748da2995433f23e033d'),
'train': ('fr_sequoia-ud-train.conllu',
'48ac01913518888a32670a687123ed1bac57e0e9')},
'fr': {'dev': ('fr-ud-dev.conllu',
'5de0aee778bcc69d14285ada88f0ff7e5ac0a0cd'),
'test': ('fr-ud-test.conllu',
'd20a014acd38193155a33a5233c13f89541c78c3'),
'train': ('fr-ud-train.conllu',
'feee0cc85a2d7dcb3397399ef22c8af8ef75420b')},
'gl_treegal': {'dev': ('gl_treegal-ud-dev.conllu',
'272558614cff4a5e1f2805626904e6dc488b8d25'),
'test': ('gl_treegal-ud-test.conllu',
'18d99474d3aa9c83878c42a79d7881330dd9b861'),
'train': ('gl_treegal-ud-train.conllu',
'b1691dd5f587a19eb9dc6f141ecbd3eec3bb0e07')},
'gl': {'dev': ('gl-ud-dev.conllu',
'e72390dce9bf973442deef31ed0cd7a975361fe5'),
'test': ('gl-ud-test.conllu',
'7d82ba3672bd4427674428e1dcbcae4feebc3aeb'),
'train': ('gl-ud-train.conllu',
'd586e7bffa314f8c5b85288e060e68dddc1f5d33')},
'de_pud': {'test': ('de_pud-ud-test.conllu',
'2c91e42b7345145290b68385ff5270910048b8c4')},
'de': {'dev': ('de-ud-dev.conllu',
'9b4f49bfa2b609d54369890d9e7d8d24a3c229af'),
'test': ('de-ud-test.conllu',
'48f0f6f98b38710906481b5e9fe1d459d28f1b4a'),
'train': ('de-ud-train.conllu',
'04a1d6a6a2da9d9c38496118e0432c9a6720db64')},
'got': {'dev': ('got-ud-dev.conllu',
'501c47193ca2af5826e4afcc04941df87a7c47c3'),
'test': ('got-ud-test.conllu',
'cfcf16d562434987562bd1f5faa0d8c007e9ddb8'),
'train': ('got-ud-train.conllu',
'b4951ede89d947c6617df782ac248566235f78fb')},
'el': {'dev': ('el-ud-dev.conllu',
'9df0919ed6f9dcab3ba3f60f0ad31d0c79ae6cdb'),
'test': ('el-ud-test.conllu',
'1bb4a6b24521f0c3c7d6cf71e2456ef3a1ee31aa'),
'train': ('el-ud-train.conllu',
'32f4abc821624c4cd4d3b3b555c1558f06366e2c')},
'he': {'dev': ('he-ud-dev.conllu',
'c5b76874fcf11c7733e1555957bb49e8298af140'),
'test': ('he-ud-test.conllu',
'4fbe4115948250fc2e42dd43399d1c6c11ddcfd2'),
'train': ('he-ud-train.conllu',
'eae49a515b38d224b109138bf006a112e80a7caf')},
'hi_pud': {'test': ('hi_pud-ud-test.conllu',
'd237fecc594186e7a52ad33313ac52e927905d73')},
'hi': {'dev': ('hi-ud-dev.conllu',
'48b592bb1aa1cbc30d41d2913421cfd3f9d2c790'),
'test': ('hi-ud-test.conllu',
'004a7fdde368f32f9f230bc5e2cf4ce9e1d8f8d7'),
'train': ('hi-ud-train.conllu',
'9be8afb2cabda361817c55b3de6ebba2c3fef7e0')},
'hu': {'dev': ('hu-ud-dev.conllu',
'ec622e6bcf2a84b0b47eba0de01cf5768157a50e'),
'test': ('hu-ud-test.conllu',
'fd717d25add38c2fb2dc8e82e2f9e5b0b9f3c5b8'),
'train': ('hu-ud-train.conllu',
'e5486523a8bebe40d633ad8b4050be8a3d11c78a')},
'id': {'dev': ('id-ud-dev.conllu',
'7b181aa954a4f4b22b80a18e4f67cbf423e9c701'),
'test': ('id-ud-test.conllu',
'357ed8c216725760bf5be561ed6e918ce602b5ac'),
'train': ('id-ud-train.conllu',
'328ea588b75de55ef48373c2bf9983bca277d724')},
'ga': {'dev': ('ga-ud-dev.conllu',
'180a1a9dcfcec6528a559032c67e9a15693a039d'),
'test': ('ga-ud-test.conllu',
'b74a56372af3f68f089ea82ba858e5a82aae4e22'),
'train': ('ga-ud-train.conllu',
'40df0b12fbadae6e56c0a01da483d6c612d9450c')},
'it_pud': {'test': ('it_pud-ud-test.conllu',
'c7121c03dbdc7d27f89c6f6dd8f046b89233438e')},
'it_partut': {'dev': ('it_partut-ud-dev.conllu',
'0bb5dc0c0815212c9832eaef3b802cf885e0543b'),
'test': ('it_partut-ud-test.conllu',
'b5eccd3d9a94a2f96c8c3a6e4192a287ac563898'),
'train': ('it_partut-ud-train.conllu',
'784b18bf8d3b59d967d147075a3cb5b03fb28637')},
'it_postwita': {'dev': ('it_postwita-ud-dev.conllu',
'07f6f658246aa070e2166e688f7569d61aafff54'),
'test': ('it_postwita-ud-test.conllu',
'c2d58f50e51d37cb5f55bd0a3129138e95a72a8a'),
'train': ('it_postwita-ud-train.conllu',
'69684c47fba99230f6ef1a204b95c37d28eaa5a6')},
'it': {'dev': ('it-ud-dev.conllu',
'ea8fd59f36280fbd77b9a807959491636048a698'),
'test': ('it-ud-test.conllu',
'34839fdeeef883f8034c723a18772947106cec6b'),
'train': ('it-ud-train.conllu',
'a0cae413f46a344366f86bc7ffe4f5d7ecbf6a14')},
'ja_pud': {'test': ('ja_pud-ud-test.conllu',
'4c914016a0968ca434348370d38c9579a60e8fd7')},
'ja': {'dev': ('ja-ud-dev.conllu',
'21f06fef7fbeccd05a298385bf40f8b4ffe95146'),
'test': ('ja-ud-test.conllu',
'240d3532698356a7c6f93c3215718ef2f66a672f'),
'train': ('ja-ud-train.conllu',
'35eaf307d94c2006241fe08f745d7b1b17f049cf')},
'kk': {'dev': ('kk-ud-dev.conllu',
'038033c822b407040a4ecb87c077506cd0d1a322'),
'test': ('kk-ud-test.conllu',
'4124bcaa6e4fc132613d94a882abcff8ecad8ca0'),
'train': ('kk-ud-train.conllu',
'48d664d273ad6731cb65228ce9b57ad3cf50f7f5')},
'ko': {'dev': ('ko-ud-dev.conllu',
'60e7da7cca44c923873a062e80262726659f5528'),
'test': ('ko-ud-test.conllu',
'bc9a0fc4ddfed14b70bb58048bf8b8d50062cffd'),
'train': ('ko-ud-train.conllu',
'ee21328f9ea39668e802f0cb6a794358f5c256bf')},
'kmr': {'sample': ('kmr-ud-sample.conllu',
'd76d631400d17b63b9592ce3c0f4ecada012d6d0'),
'test': ('kmr-ud-test.conllu',
'606a338db2d6adde6b4d7d8c9ee2bdf1f988d729')},
'la_ittb': {'dev': ('la_ittb-ud-dev.conllu',
'd9f17992bd0258a734aea9b6c53759039717c86a'),
'test': ('la_ittb-ud-test.conllu',
'f4d097d076083240c48594d4cb058840ff16be8e'),
'train': ('la_ittb-ud-train.conllu',
'627d5b30b20655efab194c75fc9219b0aa2cf4b6')},
'la_proiel': {'dev': ('la_proiel-ud-dev.conllu',
'9a510ff1f29b507ce46d32c04eb8f02ec8bdb4fb'),
'test': ('la_proiel-ud-test.conllu',
'697dbeae38507856a4fafa8506dfc8db5e8e4054'),
'train': ('la_proiel-ud-train.conllu',
'5e57e0a83ed8dcdfcc892c2558249cb6bc02b37a')},
'la': {'dev': ('la-ud-dev.conllu',
'2748bb0479cb599e1a007d1d1634d5870b45549b'),
'test': ('la-ud-test.conllu',
'19c62c64ce41a650e9b55a345c61e7c0d994816e'),
'train': ('la-ud-train.conllu',
'183ce6f58b0305e5926161e29b9a6aacc424662c')},
'lv': {'dev': ('lv-ud-dev.conllu',
'6bf3843d92aeb5b4a5e3b457708ad0aca176fbd2'),
'test': ('lv-ud-test.conllu',
'9f7806a24656db0e859efe041a88926b220b8e28'),
'train': ('lv-ud-train.conllu',
'f1eeff608e8f27d92b683ae041591355198841eb')},
'lt': {'dev': ('lt-ud-dev.conllu',
'0b8dc19005571fa7b66d8302b797d51a241f128b'),
'test': ('lt-ud-test.conllu',
'def54d6caf97610eb4ca8c0179d661c8eab98951'),
'train': ('lt-ud-train.conllu',
'13fe42a3d21f17a5cad5aaf38692619c7713e177')},
'mr': {'dev': ('mr-ud-dev.conllu',
'abf7ac90a3696bb979e6ddc17cbc0fc761040b1b'),
'test': ('mr-ud-test.conllu',
'b70e2a135e69dc17474951bfd9c7cf3f203d4798'),
'train': ('mr-ud-train.conllu',
'24a1370184054a7f5af647997dca783d6c571242')},
'sme': {'sample': ('sme-ud-sample.conllu',
'8c456f06b363c4d273fc454a49505f783f00fe43'),
'test': ('sme-ud-test.conllu',
'6c2084f60d7f2d1468a0cb4f4a4b9669274b122e'),
'train': ('sme-ud-train.conllu',
'203eab4183fd585efe3fea7e6df493a6746b0a9f')},
'no_bokmaal': {'dev': ('no_bokmaal-ud-dev.conllu',
'3a1aa6646ee62c605a6e5a7b535434ce93d0581f'),
'test': ('no_bokmaal-ud-test.conllu',
'18336ef0e4877ae28eb7d6019afe05b5a53245d5'),
'train': ('no_bokmaal-ud-train.conllu',
'c6a1d75956dfb9376e568bf241b3ee5ebf3be3a5')},
'no_nynorsk': {'dev': ('no_nynorsk-ud-dev.conllu',
'5b95a070d11a61a23fc340ecbbbbb70f86884498'),
'test': ('no_nynorsk-ud-test.conllu',
'3eaab8e4af82de2333521e9be0954ffaf6b1440b'),
'train': ('no_nynorsk-ud-train.conllu',
'79319993097c30ddf28d4c1137b8662f4f35d17e')},
'no_nynorsklia': {'dev': ('no_nynorsklia-ud-dev.conllu',
'f3e3cc9b156784c12e7540b6e09a19963df8d7d9'),
'test': ('no_nynorsklia-ud-test.conllu',
'c43abf4ad0d9c1d844edb9ff0fdf8b00949c4a0b')},
'cu': {'dev': ('cu-ud-dev.conllu',
'0b67035ed5ca52aeefae443611232ed202fb990a'),
'test': ('cu-ud-test.conllu',
'0fed872a5a2480b601c67ebbecf8dcd680b6863b'),
'train': ('cu-ud-train.conllu',
'1c58f7322b96aa65e2b6bbeb5cb5226b46dc3ef0')},
'fa': {'dev': ('fa-ud-dev.conllu',
'098f97ff4c0a6a9dcaafe2c83908b1ff044b4446'),
'test': ('fa-ud-test.conllu',
'0024aa6bad5eceed2e36f77d88578304a5886a80'),
'train': ('fa-ud-train.conllu',
'1692f90f58fb1ed2faaa4e8c5d2d47a37c47082b')},
'pl': {'dev': ('pl-ud-dev.conllu',
'b7af7bee091feb0788eb9793a7102972006421dc'),
'test': ('pl-ud-test.conllu',
'e141e793ba35f8a08510ec1ce494099b5c800ca8'),
'train': ('pl-ud-train.conllu',
'f2227ba184a5030fc47b1aff732e04ae11b9ab94')},
'pt_br': {'dev': ('pt_br-ud-dev.conllu',
'8eedc77096a87fe8ab251100d460780e161e5397'),
'test': ('pt_br-ud-test.conllu',
'37a64e3acef107b62ab62ce478fc36ed112fb58f'),
'train': ('pt_br-ud-train.conllu',
'023cafcb6959d52298ad619f7838f26db9798aa9')},
'pt_pud': {'test': ('pt_pud-ud-test.conllu',
'4f7a98b59255ff58a1a423dda6f2cb7261dcea7d')},
'pt': {'dev': ('pt-ud-dev.conllu',
'2171b4ac2b0726c9dfae6adf394b76be927accab'),
'test': ('pt-ud-test.conllu',
'9e819a4592db42905806141d6fca3b7b20396ce3'),
'train': ('pt-ud-train.conllu',
'b5fbb6598d5cc53a0f7e699adeb4a61948a49b5c')},
'ro_nonstandard': {'test': ('ro_nonstandard-ud-test.conllu',
'300d53091412dc5700dc5cad0fd3e136f7c8cb11'),
'train': ('ro_nonstandard-ud-train.conllu',
'ed97f51129b63857627f838f68f41c9ef8541686')},
'ro': {'dev': ('ro-ud-dev.conllu',
'a320e29582e837fa48bbe0aab8e205cadfcb4a02'),
'test': ('ro-ud-test.conllu',
'0cfe4806a28ebdc02dc7ea58635d8b550c3a9d7b'),
'train': ('ro-ud-train.conllu',
'74beb2aa92d2fca50dbb1a4f716b936afb436ab9')},
'ru_pud': {'test': ('ru_pud-ud-test.conllu',
'bca81ce7aaf3cb8add98b19faecc1d8303901631')},
'ru_syntagrus': {'dev': ('ru_syntagrus-ud-dev.conllu',
'304c6ec7fb5060583af5f890384e3a480f8c3ad5'),
'test': ('ru_syntagrus-ud-test.conllu',
'c138e39b48dc1c66d106e68ee75c6fce28ef780c'),
'train': ('ru_syntagrus-ud-train.conllu',
'8fa56fa80845e4ad946189d1e7af228b5595e312')},
'ru': {'dev': ('ru-ud-dev.conllu',
'd3b11c0fd8a87bfb7ce9666a1888126ae5ddca90'),
'test': ('ru-ud-test.conllu',
'ae13bbf49e0d2fddae8ba2eeacd15a9a77c7bfff'),
'train': ('ru-ud-train.conllu',
'fd43e7323ad2e62a6924fc5b5d48e85c6ab5a430')},
'sa': {'test': ('sa-ud-test.conllu',
'fad3a03a6834884a092b1d326625c6f663e36636')},
'sr': {'dev': ('sr-ud-dev.conllu',
'dcb9a242986285e83512ddaa4b3ada07c4cea17a'),
'test': ('sr-ud-test.conllu',
'0f0c9e394c440bb2dd514bdd6873d3ffef13821b'),
'train': ('sr-ud-train.conllu',
'97ea9bfe4ac97011598fbb5ca20b5cbaf5093334')},
'sk': {'dev': ('sk-ud-dev.conllu',
'c84563c08922d60b0c765e9f9c22d9f6f2765ff9'),
'test': ('sk-ud-test.conllu',
'89af4581c5f9058809f48788eb635a92cda0603c'),
'train': ('sk-ud-train.conllu',
'89e108093bbf5619578955fdadfe200cefd8cf01')},
'sl_sst': {'dev': ('sl_sst-ud-dev.conllu',
'c65ae82123af95ec11f47262546b5ab2fc5735e5'),
'test': ('sl_sst-ud-test.conllu',
'144a0124c1181b49d0c542a4a6d4465e45545f3b'),
'train': ('sl_sst-ud-train.conllu',
'4cbb97d5c19cfb1d85cdd54a13e24de2343a4ac5')},
'sl': {'dev': ('sl-ud-dev.conllu',
'0078572c19574d32defeae9924176da2dd701ede'),
'test': ('sl-ud-test.conllu',
'616ace00e25df99be8dd49b7bf7c48f1093df96a'),
'train': ('sl-ud-train.conllu',
'1462ac69163b30cf1399527e95f686ebf91be2d3')},
'es_ancora': {'dev': ('es_ancora-ud-dev.conllu',
'94b00cc6449a1793b5ba1d9d5c1e4b34ad1cc7d5'),
'test': ('es_ancora-ud-test.conllu',
'8d7dc8d8441e1ca4b54708a5382ed61b48bf7920'),
'train': ('es_ancora-ud-train.conllu',
'95d5bf7ad33304f3440ffb014ac094c4967c303f')},
'es_pud': {'test': ('es_pud-ud-test.conllu',
'c2b17fce1da3bdd2a50d9dd7eca101db1d2907e0')},
'es': {'dev': ('es-ud-dev.conllu',
'4cdb828c492c6b7707af0ab6c7fbf734f770630a'),
'test': ('es-ud-test.conllu',
'afd1ae1b7eb73a91456c30acf388eef4faf4785a'),
'train': ('es-ud-train.conllu',
'5ce48b44ba1b3e748a40cb5bf893d3096518ecbc')},
'sv_lines': {'dev': ('sv_lines-ud-dev.conllu',
'15f1a04d960518fe7bfee23ce227fc7b78d4b755'),
'test': ('sv_lines-ud-test.conllu',
'843df4ea3ab4f551b1eaa661652a8d6489a81d41'),
'train': ('sv_lines-ud-train.conllu',
'16e3533bf174b36d728847a36a3600f16c63baa6')},
'sv_pud': {'test': ('sv_pud-ud-test.conllu',
'18dadac0c15468256b340835ebc0529facbe9b73')},
'sv': {'dev': ('sv-ud-dev.conllu',
'6d14e1aae5c9ae37c35481c44c04bf74a4233455'),
'test': ('sv-ud-test.conllu',
'7ead0f7b49508db0022c042195ac5925b611c5b7'),
'train': ('sv-ud-train.conllu',
'68affb85efde6ed017eab1e998e9666108559e04')},
'swl': {'dev': ('swl-ud-dev.conllu',
'828e0a08f12cabfa75f9dd2b53dba58606522a7c'),
'test': ('swl-ud-test.conllu',
'674f76631cf16172d67b795ff92dfbb297eb4930'),
'train': ('swl-ud-train.conllu',
'46b721f9cae2d5ba43f818dd487600b0ce76362a')},
'ta': {'dev': ('ta-ud-dev.conllu',
'4d01f555012ddc1976933d4d928e26470f71bfa1'),
'test': ('ta-ud-test.conllu',
'e8db8816a98d8b7e81188786db7c405979a7e3c3'),
'train': ('ta-ud-train.conllu',
'6753d8c7b1b016de39c087aab45056de6021c3ae')},
'te': {'dev': ('te-ud-dev.conllu',
'29f46355d767e54e8565f76a063c43e95ead0fca'),
'test': ('te-ud-test.conllu',
'50abe345d4ab5bae021cacd096266c57b00572b8'),
'train': ('te-ud-train.conllu',
'1794469abe09e7364cda0d9764cf515dcb4a61b6')},
'tr_pud': {'test': ('tr_pud-ud-test.conllu',
'aae839e2476a2f149c98e0274d245d07a50dafaa')},
'tr': {'dev': ('tr-ud-dev.conllu',
'421de4d8d0fbdda46750523bde72880414c134a3'),
'test': ('tr-ud-test.conllu',
'b175f136f6f0271c494a58a1846971c4a07cda27'),
'train': ('tr-ud-train.conllu',
'5aeaf25fc9e00c75e377983a0d0a642e4df6ae7d')},
'uk': {'dev': ('uk-ud-dev.conllu',
'0d3e3507edcd46a3eaa8c4702d0f5d84661a6d9d'),
'test': ('uk-ud-test.conllu',
'46c88fd623894fabdafb01a826016c215e4f65cc'),
'train': ('uk-ud-train.conllu',
'd06e0e2fa67c35a20517738bd728ac3b26d8eafe')},
'hsb': {'sample': ('hsb-ud-sample.conllu',
'148eddbb19b06115ea54e17a3fca58e99a85cbd9'),
'test': ('hsb-ud-test.conllu',
'3d319288b4c06395b2627980737131995949f770')},
'ur': {'dev': ('ur-ud-dev.conllu',
'dc41e72b5adeb92f308cdc8dfcbf71f84b4a5cf9'),
'test': ('ur-ud-test.conllu',
'af5da25be4c4ec1f2a222bc462b39ca4bbcc0eb0'),
'train': ('ur-ud-train.conllu',
'488d65b394d0de264be1221614c09e541f92f9de')},
'ug': {'dev': ('ug-ud-dev.conllu',
'a2e6cd7ef51ffd7c83de7c62fbad998f1020f857'),
'test': ('ug-ud-test.conllu',
'4877323d8dbfaa8ab862f0aa8e5484fdadb9ef43')},
'vi': {'dev': ('vi-ud-dev.conllu',
'1c733d3ea3e4cce00cb0aa4d599bcb3b0a6096a8'),
'test': ('vi-ud-test.conllu',
'1bb822e58f21aa5ccac15fe6c6742a42e8389d41'),
'train': ('vi-ud-train.conllu',
'ac86132afc061625740abd524c5cdf3d35ebbbc4')}}
|
flexible
|
{
"blob_id": "4dde161d25ed41154e13b94cc9640c6aac055f87",
"index": 6164,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nUNK_TOKEN = '<unk>'\nBOS_TOKEN = '<bos>'\nEOS_TOKEN = '<eos>'\nPAD_TOKEN = '<pad>'\nUNK_IDX = 0\nLARGE_POSITIVE_FLOAT = 1e+18\nLARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT\nGLOVE_NPZ_SHA1 = {'glove.42B.300d': ('glove.42B.300d.npz',\n '7deee8f4860744db53ed9e50892effe9883e6d89'), 'glove.6B.100d': (\n 'glove.6B.100d.npz', '01f80f202fcabcc3e0804898349087bfc191dd1c'),\n 'glove.6B.200d': ('glove.6B.200d.npz',\n '5e6e2bdab346c257f88d80d215d518e680d86e32'), 'glove.6B.300d': (\n 'glove.6B.300d.npz', '1db264aa936be62f055dfb72854204450bdf4399'),\n 'glove.6B.50d': ('glove.6B.50d.npz',\n 'aa16be8d184399d2199f83fd62586f2c30497bfa'), 'glove.840B.300d': (\n 'glove.840B.300d.npz', 'b4ba390c1154736e07c0e67d9180935f5930e83c'),\n 'glove.twitter.27B.100d': ('glove.twitter.27B.100d.npz',\n '0f7b82c223451d0002f79ba23596983cdbe0e2b1'), 'glove.twitter.27B.200d':\n ('glove.twitter.27B.200d.npz',\n '41cc2d26f58a54622ce96bf6c8434360ab524f20'), 'glove.twitter.27B.25d': (\n 'glove.twitter.27B.25d.npz', '9f563d2f296995598cc46812b2fda05ad4c3c879'\n ), 'glove.twitter.27B.50d': ('glove.twitter.27B.50d.npz',\n 'ce9959c056f2a0a780c468feeb4f823af51630e9')}\nFAST_TEXT_NPZ_SHA1 = {'crawl-300d-2M': ('crawl-300d-2M.npz',\n '9dd611a1fe280c63050cd546d3595400fc0eede4'), 'wiki.aa': ('wiki.aa.npz',\n '48f163b80eb37f1806142169d3d4c05cf75b7339'), 'wiki.ab': ('wiki.ab.npz',\n '860ceff119dd27e5b701b605879037c1310cbc3e'), 'wiki.ace': (\n 'wiki.ace.npz', '62938287464040491719f56a6f521f8f808beee8'), 'wiki.ady':\n ('wiki.ady.npz', '646843afa260d018ed711df3f1ca9c3e000447b6'), 'wiki.af':\n ('wiki.af.npz', '7b14cd27690b67fea318d0bac2283c16430680e2'), 'wiki.ak':\n ('wiki.ak.npz', '20f309adad1c45958c97b6055d5838e05bbaea72'), 'wiki.als':\n ('wiki.als.npz', 'a8b03aa133c4f7da12fc27c2b167b7918b1e9805'), 'wiki.am':\n ('wiki.am.npz', 'ed3dd10cea64737f7a1623612ee099df9dc19f66'), 'wiki.ang':\n ('wiki.ang.npz', '8efe64706d9d6b8eae38b2c7ff0b277e20592bc7'), 'wiki.an':\n ('wiki.an.npz', '168046283c719ab96a29b1abae2e25a6575c7be8'), 'wiki.arc':\n ('wiki.arc.npz', '049021b7decea4bc009b12936e56b4dbf5b760e7'), 'wiki.ar':\n ('wiki.ar.npz', '7e325e1e98dfcdc9368d2ebe40ee834a2ed44912'), 'wiki.arz':\n ('wiki.arz.npz', '7d851c2c7be3ee6f7fd896de7b76ea08e3fb08b0'), 'wiki.as':\n ('wiki.as.npz', '01d38c29cd4bd99c1a8534abc058822da14a5b9c'), 'wiki.ast':\n ('wiki.ast.npz', '9c9846ba5084505a0adea89c95c66e04efbf5ce9'), 'wiki.av':\n ('wiki.av.npz', '7ef6a920c364638504e673cfde5f7675503fa81e'), 'wiki.ay':\n ('wiki.ay.npz', 'c1202e110930e3902397f5cb64a8359e013b469f'), 'wiki.azb':\n ('wiki.azb.npz', '10351b7ef14ec2cb610d290cb6a3f6987ef5d8b3'), 'wiki.az':\n ('wiki.az.npz', '74257c3bcd533a606afae509ea835dc036d61546'), 'wiki.ba':\n ('wiki.ba.npz', '4a2857ed694d66864df562b376c2fa12fcb03646'), 'wiki.bar':\n ('wiki.bar.npz', 'e65c6b7e9ff83798d1eea05d166148837d53e615'),\n 'wiki.bat_smg': ('wiki.bat_smg.npz',\n '6420584ae28ba6c9dd145fea8f096243d457c2d8'), 'wiki.bcl': (\n 'wiki.bcl.npz', '33606c970ab336b678393e2bdb8af2116d11cf7b'), 'wiki.be':\n ('wiki.be.npz', '84487d341e333344cf71bc12c7a205d923762498'), 'wiki.bg':\n ('wiki.bg.npz', '56f2a175b1a1d1a9cf9f1cea277cd0b46ffd7f66'), 'wiki.bh':\n ('wiki.bh.npz', '07473989853a344a41aaa18f41030dc56d0d01c7'), 'wiki.bi':\n ('wiki.bi.npz', '08adfa3c9ef3016d30ef69ea539d217ff67eda09'), 'wiki.bjn':\n ('wiki.bjn.npz', '998a551283222931d3a26922308449950bfa3ec7'), 'wiki.bm':\n ('wiki.bm.npz', '454ff9fbd4790e4a076d9a2087a51da28aa1332f'), 'wiki.bn':\n ('wiki.bn.npz', '1f36f6f39c9a9b33bb8035c9a4dc7e04933604fd'), 'wiki.bo':\n ('wiki.bo.npz', 'b9fe87318428de0a7790de175b5fec80c5af482d'), 'wiki.bpy':\n ('wiki.bpy.npz', '5c7853173d27e2c018c24eca69de8d5f34511b0d'), 'wiki.br':\n ('wiki.br.npz', '7aa66a2034fbfaa1d39e637385d48610238797c9'), 'wiki.bs':\n ('wiki.bs.npz', 'a019a4677677c2e9e4d899326b2b6c15ad6c011a'), 'wiki.bug':\n ('wiki.bug.npz', '09ae3477941d7a99d1df494368d7efb0b2c18913'),\n 'wiki.bxr': ('wiki.bxr.npz', 'b832c691b8ddd95896c052d3d15e1f98d72068d5'\n ), 'wiki.ca': ('wiki.ca.npz',\n '391e0d4daad08649251274fa1cc2a5f49c7728b1'), 'wiki.cbk_zam': (\n 'wiki.cbk_zam.npz', '02e57a763bc9f9eadaba57953383dd12a0a78a37'),\n 'wiki.cdo': ('wiki.cdo.npz', 'd6e8f422327e8b2273f1f2662d793707ece6695d'\n ), 'wiki.ceb': ('wiki.ceb.npz',\n '23bc0bb9aeaa57dff35092766941a866de142aae'), 'wiki.ce': ('wiki.ce.npz',\n '182b2a889256119a6d379d501c55c7621e5855db'), 'wiki.ch': ('wiki.ch.npz',\n '82dd77512fcb463481f43c9cef3507e2baa90d7b'), 'wiki.cho': (\n 'wiki.cho.npz', 'b0b620fc2442d1a6e2440e71a424861c80175f0c'), 'wiki.chr':\n ('wiki.chr.npz', '3d62c6b95c5af46abd6234426ae760cca65d5bd0'),\n 'wiki.chy': ('wiki.chy.npz', '34a28a22da79aebc100e3714b825c95c8d5f54a3'\n ), 'wiki.ckb': ('wiki.ckb.npz',\n 'ad19461e4be583d08b7693ff5b1e9d590ed41add'), 'wiki.co': ('wiki.co.npz',\n 'fa60d9f0e79f1c7e15f381aef983a0f4f31c05a8'), 'wiki.crh': (\n 'wiki.crh.npz', '540270ba6edd9d7b2f7efca52b3b407524ac67d1'), 'wiki.cr':\n ('wiki.cr.npz', 'f06b77465a38ec960d7d5a7554b848c37e945c76'), 'wiki.csb':\n ('wiki.csb.npz', 'b8b28559cf2541341af98e2aa755856765bdeabf'), 'wiki.cs':\n ('wiki.cs.npz', '19881e931fe06abf341450f00c342d364313e232'), 'wiki.cu':\n ('wiki.cu.npz', '731e0d00abd53bc2a8eb6cf37f6ab883cff34e15'), 'wiki.cv':\n ('wiki.cv.npz', 'e60034fcffb7dfef7b236ddba1194c3aa20b7967'), 'wiki.cy':\n ('wiki.cy.npz', '5a0fb967b5556f007c0d5065f951a3d3b1c1005a'), 'wiki.da':\n ('wiki.da.npz', 'd06258014ba2c7450bc2d55edfdf1731433e42e5'), 'wiki.de':\n ('wiki.de.npz', 'a21694dfd2af63bd7bb00f0b60b28e88bd1153f1'), 'wiki.diq':\n ('wiki.diq.npz', '4f6c77a86b39834a7130419967759afd8cc26b84'),\n 'wiki.dsb': ('wiki.dsb.npz', 'e74f1d346a8db96987bff0c33ee5f886907c380a'\n ), 'wiki.dv': ('wiki.dv.npz',\n '5d6fe6f0eec2e7704121d5aba03b4edbb28af873'), 'wiki.dz': ('wiki.dz.npz',\n '77c639d36d0355b2de5adead7996eae342b852a6'), 'wiki.ee': ('wiki.ee.npz',\n '4b5a76127d57515d3e8a76787cdefde5856b754a'), 'wiki.el': ('wiki.el.npz',\n 'a00bcb97e7898931196a1c69f7a492e5b6202661'), 'wiki.eml': (\n 'wiki.eml.npz', 'b475d626b3d97e7a68c02827fdc7900599e838c6'), 'wiki.en':\n ('wiki.en.npz', 'ad5ec6d49db6c6fe76b8e85ff05d34e5d0e1eb6a'), 'wiki.eo':\n ('wiki.eo.npz', '18049b0010520d13e676f5a82e8bb90153d99003'), 'wiki.es':\n ('wiki.es.npz', 'a6d192ba7d82d762f8367e75ca951aad4d11e410'), 'wiki.et':\n ('wiki.et.npz', '4beb7025cf88f1aa62d025b187f0cb09aee61858'), 'wiki.eu':\n ('wiki.eu.npz', '5e1a8197e35f20a2476798bbb935b4c131289c4f'), 'wiki.ext':\n ('wiki.ext.npz', '049b2d1b0a8b102b45907cf487cac30aa294e0a0'), 'wiki.fa':\n ('wiki.fa.npz', '81ed274997c87ef87d73d25e166ca06272ce426f'), 'wiki.ff':\n ('wiki.ff.npz', '4867dc74cd53ca0b0f769af4fa1ea420406b59bf'), 'wiki.fi':\n ('wiki.fi.npz', '6d1291b854045179f8171ac7d62ede7d8ac159a2'),\n 'wiki.fiu_vro': ('wiki.fiu_vro.npz',\n 'dd87806d9dc8833fa0e21e35a50815ebdbaa6c8b'), 'wiki.fj': ('wiki.fj.npz',\n 'cf5c31b0a69276f5dd18ab738ed92444abaeb755'), 'wiki.fo': ('wiki.fo.npz',\n 'ffc19807d528af000861a94cfb8097bd686e14fc'), 'wiki.fr': ('wiki.fr.npz',\n '8f06d5dbe3cf7214354fe9b2f6eca0ef7419f063'), 'wiki.frp': (\n 'wiki.frp.npz', 'c8b200ae592478d3cd0bfaafcd7aa19de8a3bfe5'), 'wiki.frr':\n ('wiki.frr.npz', 'fa5e5c39ea2a45793c679eacea290a35e37405ea'),\n 'wiki.fur': ('wiki.fur.npz', 'a61a8940d059f25000e3fe23933e5ed0d37e65d3'\n ), 'wiki.fy': ('wiki.fy.npz',\n '46f9f41bdf6f4fb8e27a753290413d745465963b'), 'wiki.gag': (\n 'wiki.gag.npz', '49fb01230e6803544122d47ab7d3fe694d1444f2'), 'wiki.gan':\n ('wiki.gan.npz', '716b7b26acc15975f30caf3c6effa111516fcca5'), 'wiki.ga':\n ('wiki.ga.npz', 'ea934bc1fdc1acf6caf9ac746c6c499251f1fdee'), 'wiki.gd':\n ('wiki.gd.npz', '597017b5a32d933f194595d3656f858e37e70a62'), 'wiki.glk':\n ('wiki.glk.npz', '91a5834658bc2d48714e8807ef24efb79567b4b5'), 'wiki.gl':\n ('wiki.gl.npz', '2fa8e48d6ae1e9c9d542eb3f2156cf9e359e66c2'), 'wiki.gn':\n ('wiki.gn.npz', 'e359eef3928e1f1b5d8fcf0ea532e8794c66289a'), 'wiki.gom':\n ('wiki.gom.npz', '8cd361481c23f7545cc2bd8f1bf22aa7400edd4d'),\n 'wiki.got': ('wiki.got.npz', 'd05daf105611150695e61775fdff2c500b36be3f'\n ), 'wiki.gu': ('wiki.gu.npz',\n '0ce175c5fc39bab4032892f70c9d2bb850af0f4a'), 'wiki.gv': ('wiki.gv.npz',\n '2c573f873d607831ff01b64603c17b8db79bd7e1'), 'wiki.hak': (\n 'wiki.hak.npz', 'e6048727799cdf149f5c50037e0fc59300d33a94'), 'wiki.ha':\n ('wiki.ha.npz', 'f18ea7286bbd390c5470896b2c99cb1adc740064'), 'wiki.haw':\n ('wiki.haw.npz', '18bcd85d2e06b1b889f0835fc5b62697fdf32d72'), 'wiki.he':\n ('wiki.he.npz', '76915ff167b6ecb7b7e22ff0ca46914a55d344af'), 'wiki.hif':\n ('wiki.hif.npz', '12153aaf98d76d5502ab77a27cd0b9a539f61513'), 'wiki.hi':\n ('wiki.hi.npz', '249666a598991f6ec147954c6af9e531fd1cd94e'), 'wiki.ho':\n ('wiki.ho.npz', '3f804fd69780c0789708b56ea9d48715f8e38f26'), 'wiki.hr':\n ('wiki.hr.npz', '9a3de28e69f97048bfb480b4f83eaab6149f66ad'), 'wiki.hsb':\n ('wiki.hsb.npz', '7070bf64e13299dd66ac0e9f8e24011a56b6bfe8'), 'wiki.ht':\n ('wiki.ht.npz', 'a607093d511afeb584d02dc676bc5a27eff66287'), 'wiki.hu':\n ('wiki.hu.npz', '9b2c4750daf1bcf39768572e874b5afda0e2f0bc'), 'wiki.hy':\n ('wiki.hy.npz', 'ec0461a102a6fb00bd324f66cefd3c8d55a7093a'), 'wiki.hz':\n ('wiki.hz.npz', '5dfb8afbdae6b4148c3e55ab459c56a74b46b463'), 'wiki.ia':\n ('wiki.ia.npz', '4cfaaf053b9513bbf5b2423258c0f01d20256de6'), 'wiki.id':\n ('wiki.id.npz', 'bace396bb9941cc9e5b2e5f5a19be6db833c5fd4'), 'wiki.ie':\n ('wiki.ie.npz', '1bae7256c2e763ce6d692d1c0a603d99a8b22826'), 'wiki.ig':\n ('wiki.ig.npz', '23128e54a5e143891d392d621723bad9cfc8cf7b'), 'wiki.ii':\n ('wiki.ii.npz', '54bc16d05da512481865a89ecf30260b0acc04dc'), 'wiki.ik':\n ('wiki.ik.npz', 'f8015227e893d2375699b7d132b306ba381f02ac'), 'wiki.ilo':\n ('wiki.ilo.npz', '185a11f81bd5d24a34558dda81ee4735f5ba150b'), 'wiki.io':\n ('wiki.io.npz', 'ddf8180a90aa6ee5be93a2582cc99c535f21363e'), 'wiki.is':\n ('wiki.is.npz', '968f8dd2a093b279a6f7aaa734008454bf51d724'), 'wiki.it':\n ('wiki.it.npz', 'fdfb857a309b2c3d29482bb5cc55f21b858d2e6f'), 'wiki.iu':\n ('wiki.iu.npz', 'fa8896730bd6c24c3473daa22116d1016294e7f7'), 'wiki.jam':\n ('wiki.jam.npz', 'a8f0d0b99c89ace0a6401b8fcda261d06065faaf'), 'wiki.ja':\n ('wiki.ja.npz', '8d42e5a40e4d1d8645b2d80b873a65cadcf68b5c'), 'wiki.jbo':\n ('wiki.jbo.npz', '145fc999ab004b348cf9bf445f0a93a7a145308b'), 'wiki.jv':\n ('wiki.jv.npz', '66978770bf06e42414395cf5fd8c596044d72bec'), 'wiki.kaa':\n ('wiki.kaa.npz', '624a640ecb9901b2aba2e9f44ab615146ecb2862'),\n 'wiki.kab': ('wiki.kab.npz', 'e97f93b6ba65e95c85b7541932cf53c5ad9eb896'\n ), 'wiki.ka': ('wiki.ka.npz',\n '1ca8376e1e0cbd58001c1b51a2d488a2874a6743'), 'wiki.kbd': (\n 'wiki.kbd.npz', 'f2d2a05b06723ac549784ad5470d84f5742a1352'), 'wiki.kg':\n ('wiki.kg.npz', 'fa7f6d5f660a173a3e75342d449980eedcdc789e'), 'wiki.ki':\n ('wiki.ki.npz', '21a8c7c616c0050c51c288861f3423f313e4f634'), 'wiki.kj':\n ('wiki.kj.npz', 'f3c347509a0d81f4f7fdbb8b22889b8d76e5014e'), 'wiki.kk':\n ('wiki.kk.npz', 'bc24a3289e1c1e18e16b6789c2f9f92af1e73071'), 'wiki.kl':\n ('wiki.kl.npz', 'b8b7e7359f067836e2be2ecfe9f35a820b00fe1d'), 'wiki.km':\n ('wiki.km.npz', 'e053799fd01463808432dc035bef3e36620e2f36'), 'wiki.kn':\n ('wiki.kn.npz', '2849a0a8b3453e9bf6af05d4c7bd3db881dd1068'), 'wiki.koi':\n ('wiki.koi.npz', 'a9b02e9bd41833bcd54769f94626019c03f29997'), 'wiki.ko':\n ('wiki.ko.npz', '764d9896e74b5a26c6884d48bce3bed8ed3a7822'), 'wiki.krc':\n ('wiki.krc.npz', 'bfe39598c718f1cc95909db7544b3214b308a97c'), 'wiki.kr':\n ('wiki.kr.npz', '1e6af853d4a8ea7830e116eb9b61ac5d7d9a315c'), 'wiki.ksh':\n ('wiki.ksh.npz', '66cd0e3e0a0b0282a13960571ebe7cddd7706bf2'), 'wiki.ks':\n ('wiki.ks.npz', '85f1adaa05b854df4dede745a1aaab3836e60770'), 'wiki.ku':\n ('wiki.ku.npz', 'faf90584e5a45e6d0f9eeb88399b82abe037d584'), 'wiki.kv':\n ('wiki.kv.npz', '9f2b41822013a412da9c99fac06eed8be03ca192'), 'wiki.kw':\n ('wiki.kw.npz', '3eed8a8fc97a2fc79241b8474a458c98d00fc897'), 'wiki.ky':\n ('wiki.ky.npz', '0116ff90f10a6c0728e1ea86d8a44896ea83270a'), 'wiki.lad':\n ('wiki.lad.npz', '5af2015b3d1c5e8563f0e92721580988ebe2ce50'), 'wiki.la':\n ('wiki.la.npz', '7143303a3ea13c7668eb90ea6e3d2ca69857a3be'), 'wiki.lbe':\n ('wiki.lbe.npz', 'f206a3c35a184ba5d2b32ee68640eadf66c847da'), 'wiki.lb':\n ('wiki.lb.npz', '143dc6337f3690379282034c460c613d7f144923'), 'wiki.lez':\n ('wiki.lez.npz', 'b29a680decc6b29f24e8eb9e4f8e11e3419d45f1'), 'wiki.lg':\n ('wiki.lg.npz', '866640ce62cedbc1d453b7ea3c289c291ad76e13'), 'wiki.lij':\n ('wiki.lij.npz', '0dcd3d7009ae89b1016ca6cdb99a9f0d70bc4baf'), 'wiki.li':\n ('wiki.li.npz', '4666b3c238256d7b7623a136db19b8b9f4754734'), 'wiki.lmo':\n ('wiki.lmo.npz', 'ac89fa7cfe0675950bcb31c66bf3f88a3cfc98f0'), 'wiki.ln':\n ('wiki.ln.npz', 'fba158719944aabe58e0002a90be0ed77e11702d'), 'wiki.lo':\n ('wiki.lo.npz', '1e113e340a8a93d385e14502c9c4e3bcdf6c3101'), 'wiki.lrc':\n ('wiki.lrc.npz', '42cb755f398fba6f0da7949c91e92b55654bd482'),\n 'wiki.ltg': ('wiki.ltg.npz', '182f75859e228d1162215f28fe7f2dca127624a4'\n ), 'wiki.lt': ('wiki.lt.npz',\n '66aa944bd2e777cb82d6d59b1f2f837b6c48cb37'), 'wiki.lv': ('wiki.lv.npz',\n '2be8f926da85694fa998bf79d80b61ebb8d67576'), 'wiki.mai': (\n 'wiki.mai.npz', 'b8a9c36e2a0f1bb84a44dc762250d2a9007ef637'),\n 'wiki.map_bms': ('wiki.map_bms.npz',\n '6f0394d6b3d08a946e3df4b9355efe94148f018a'), 'wiki.mdf': (\n 'wiki.mdf.npz', '774ee35334641db57f9ac9069961c5372a5d92e8'), 'wiki.mg':\n ('wiki.mg.npz', '496c48ef668f08ce95ebb11ce1ce5026b52d935c'), 'wiki.mh':\n ('wiki.mh.npz', '352edd84f99c5aa277a7306f6cacea1fab065ed3'), 'wiki.mhr':\n ('wiki.mhr.npz', 'dd78b27a674ac10411cdf74ac32f9391506b17e0'),\n 'wiki.min': ('wiki.min.npz', '628b406441ab03bc8aa68195ada50bfdc8226f34'\n ), 'wiki.mi': ('wiki.mi.npz',\n '754127b473861cd4f9ae034c9f527a34827b1f00'), 'wiki.mk': ('wiki.mk.npz',\n 'b09fed4f56c296f13c4020ef1fec498382a38b73'), 'wiki.ml': ('wiki.ml.npz',\n '02fb55d97ca2f0408f0e7e8dd6a661bbc3319a2a'), 'wiki.mn': ('wiki.mn.npz',\n '08b2c45689aa5d9ec49df96dc7c777ce9b9a0b4b'), 'wiki.mo': ('wiki.mo.npz',\n '638c2e8bd2352fd52921b9ae62f578b8357bab49'), 'wiki.mrj': (\n 'wiki.mrj.npz', 'ec5cf1f4fb8dfdca64d8172974e620eb8fa41626'), 'wiki.mr':\n ('wiki.mr.npz', '074dd68c947c2f137a3e84b55012925f00213139'), 'wiki.ms':\n ('wiki.ms.npz', '3dbe9e9d70251de8a374776ff1250a9c3103ee59'), 'wiki.mt':\n ('wiki.mt.npz', 'f5103998a68d1b178387417436a83123d44aba01'),\n 'wiki.multi.ar': ('wiki.multi.ar.npz',\n 'a010d1d81a465c56ebaf596b3e8e8795e7f0f8e3'), 'wiki.multi.bg': (\n 'wiki.multi.bg.npz', 'c04018f3a600cee170f12a36cdd35b4727a2aade'),\n 'wiki.multi.ca': ('wiki.multi.ca.npz',\n 'eef52a0cf20c133ca9065de25f0702861a8cfa29'), 'wiki.multi.cs': (\n 'wiki.multi.cs.npz', 'c5f547aa78c0e3d7dae67a0334d500bf2a86aa30'),\n 'wiki.multi.da': ('wiki.multi.da.npz',\n '24374f2ee169b33327feeee46da31b0de1622fe4'), 'wiki.multi.de': (\n 'wiki.multi.de.npz', '2e6c119b345bebd34b56eaaf855d6703889b11f7'),\n 'wiki.multi.el': ('wiki.multi.el.npz',\n '9d122beedb80a2e5334946641e5bafd32c01e76b'), 'wiki.multi.en': (\n 'wiki.multi.en.npz', '8c3c480b4cb2690304173713a646280613b244a8'),\n 'wiki.multi.es': ('wiki.multi.es.npz',\n '483a22656e4fb2a01e9f4ef8156b261e780850ab'), 'wiki.multi.et': (\n 'wiki.multi.et.npz', '22498c7b91645a3874fa738b5cfb16bf98b6f97c'),\n 'wiki.multi.fi': ('wiki.multi.fi.npz',\n '765a6f0b63777bff4ae6ca2b461c5889c03d6a70'), 'wiki.multi.fr': (\n 'wiki.multi.fr.npz', 'decd9aacf600114b8a36072535c0309874a37c83'),\n 'wiki.multi.he': ('wiki.multi.he.npz',\n '7eee940c1b85936f59122f4b1a166223dd946674'), 'wiki.multi.hr': (\n 'wiki.multi.hr.npz', '1673963416af088f8bf15576afb33d58115db35c'),\n 'wiki.multi.hu': ('wiki.multi.hu.npz',\n 'a1fbe6ededf3cbaa3eaa22dd8b20cce4b36cfc6d'), 'wiki.multi.id': (\n 'wiki.multi.id.npz', '6c3e721febb511ede7db7bf978d65769e4270f5c'),\n 'wiki.multi.it': ('wiki.multi.it.npz',\n 'fc5bfc11e0165e8d95c1708573dad5e456826c73'), 'wiki.multi.mk': (\n 'wiki.multi.mk.npz', '6cd50198355674f156fc863108d9bebf11cfabd9'),\n 'wiki.multi.nl': ('wiki.multi.nl.npz',\n '4fa06b9230c95dfa5a9e9a5d80f1f5ba614d3cbf'), 'wiki.multi.no': (\n 'wiki.multi.no.npz', '63756168c1101e73fba8d1a5015f32b8892819e6'),\n 'wiki.multi.pl': ('wiki.multi.pl.npz',\n '958b8e8bead965ba1bb1433e1c960fc3e12a10fb'), 'wiki.multi.pt': (\n 'wiki.multi.pt.npz', '22f07df1609d79b95344ee575ea43141424a1528'),\n 'wiki.multi.ro': ('wiki.multi.ro.npz',\n '73180b3e382519004bf38ea7b86237aacbbe813a'), 'wiki.multi.ru': (\n 'wiki.multi.ru.npz', '3b2eb9163f35e90bf2ce1cd3c997b354d0c34f59'),\n 'wiki.multi.sk': ('wiki.multi.sk.npz',\n '606a0c3ba9849070c6b6b8c22d920fdeed9a1385'), 'wiki.multi.sl': (\n 'wiki.multi.sl.npz', '3cfdab5043b8cfe1535cb6dbd4c9e68847ad5904'),\n 'wiki.multi.sv': ('wiki.multi.sv.npz',\n '4f1494885b9a831e87cfa3c15f2204c4a73c0779'), 'wiki.multi.tr': (\n 'wiki.multi.tr.npz', '54f90d5ddb9a65538a41e37c5a67ed933a5e4885'),\n 'wiki.multi.uk': ('wiki.multi.uk.npz',\n '500fd26b1d7a25b42458012e99f9f76642e0c787'), 'wiki.multi.vi': (\n 'wiki.multi.vi.npz', '3955809cceb300965c15f9372221417719bb0db8'),\n 'wiki.mus': ('wiki.mus.npz', 'a5f48934a3fa6eaf4929098046c93fc94dd6bcb6'\n ), 'wiki.mwl': ('wiki.mwl.npz',\n '8a5e2c272166f8a72c5694ca6c3104d5f49179ec'), 'wiki.my': ('wiki.my.npz',\n '5e035aca16700d7d6695af8a6d3a88ac847aaeb7'), 'wiki.myv': (\n 'wiki.myv.npz', 'd4cfaab70c640033e02c0fc0c5a3615ae836c569'), 'wiki.mzn':\n ('wiki.mzn.npz', 'ad09ac584ae455b5862b95125ef409360ae18445'),\n 'wiki.nah': ('wiki.nah.npz', '2dc454ef37d059f2053af46cfa1f4f0ca939cba0'\n ), 'wiki.na': ('wiki.na.npz',\n '401f0f880eb7aa78d21348bc1e0a3953b3e81bf0'), 'wiki.nap': (\n 'wiki.nap.npz', '996da46aeeab5644ba766d00c5e343b1553361d7'),\n 'wiki.nds_nl': ('wiki.nds_nl.npz',\n '5a9307e16b13a5a82ec19a52b33254537e7198e7'), 'wiki.nds': (\n 'wiki.nds.npz', 'b249a87c78c52becf51e7b50aaf9f9b6a36585f1'), 'wiki.ne':\n ('wiki.ne.npz', 'a601db2647a74ffd2b4b43dcb8584735f555459c'), 'wiki.new':\n ('wiki.new.npz', 'c398a3775aba9c68ce765cfdfb6b188f7c47e4c6'),\n 'wiki-news-300d-1M': ('wiki-news-300d-1M.npz',\n '0a03bbd508e5381e140476140fb121afeb0050ed'),\n 'wiki-news-300d-1M-subword': ('wiki-news-300d-1M-subword.npz',\n '69edae21375407781c727dcb9e534e79d712d137'), 'wiki.ng': ('wiki.ng.npz',\n 'befd774d15f69d43547e13e5ea3a97c4cb1ab405'), 'wiki.nl': ('wiki.nl.npz',\n '5a7cb6f1dd0a7621202abba9461ac2c5bf905219'), 'wiki.nn': ('wiki.nn.npz',\n '8e5059ddeb24050fadaa5cc4622b13feb3e4a226'), 'wiki.no': ('wiki.no.npz',\n '5ce6e0f793e66f081652f64013968099de03d9f9'), 'wiki.nov': (\n 'wiki.nov.npz', '95ed23b4cfd7a65afa1c12c7dbdce6af53923d77'), 'wiki.vec':\n ('wiki.vec.npz', '08ebb912efeb9df1c7d05e1af90484d210dff47e'),\n 'wiki.nrm': ('wiki.nrm.npz', 'e58614b4508ff9810f0b58fd818f973775bc918d'\n ), 'wiki.nso': ('wiki.nso.npz',\n '56a2ebe260241402d117cd89c5c872b9c96ff05b'), 'wiki.nv': ('wiki.nv.npz',\n 'c713051fe03ec1f60314bb42161b2a47fb5e169a'), 'wiki.ny': ('wiki.ny.npz',\n 'ba5a1725955cbc13e7fd93ab499f8085840c992c'), 'wiki.oc': ('wiki.oc.npz',\n '259e7d994c38a4cfc140fb07016b82d6781e5027'), 'wiki.olo': (\n 'wiki.olo.npz', '0fea70f887def4779ee70a79366b88f1ada65004'), 'wiki.om':\n ('wiki.om.npz', '47e2d756b5f8913085d901375c1b4e0b118a4221'), 'wiki.or':\n ('wiki.or.npz', '7e274ab060219b019aa02bb97941cc6e162fd01f'), 'wiki.os':\n ('wiki.os.npz', '19e8199cc2aaffdb07b6c558dbc5465ac6e03155'), 'wiki.pag':\n ('wiki.pag.npz', 'eddf4931547649026c02f893297ef673ec6158bb'),\n 'wiki.pam': ('wiki.pam.npz', '40109aa174bd9f0fa657839bb548e2b0646c58d3'\n ), 'wiki.pa': ('wiki.pa.npz',\n '8a5870717e9e641b1f757f13259171698118de2e'), 'wiki.pap': (\n 'wiki.pap.npz', '999c8e5b005ca20d9998fbbe4fa79177f69e24c0'), 'wiki.pcd':\n ('wiki.pcd.npz', 'e975066b323a65cdc5e4c27138ef674d2cf7250b'),\n 'wiki.pdc': ('wiki.pdc.npz', '5c770b9d56f276b0aa535845f175c05ee1cea615'\n ), 'wiki.pfl': ('wiki.pfl.npz',\n '0063d0b633ee529a75482b36ed4f4da7d64994ec'), 'wiki.pih': (\n 'wiki.pih.npz', 'ce1d76c94d248545eea0d7436c54849dbb380bfc'), 'wiki.pi':\n ('wiki.pi.npz', 'c7d56c334bf529f8b3655693d207a80feaec4aed'), 'wiki.pl':\n ('wiki.pl.npz', '0d612fdf871a1a4084c867f394940475be899443'), 'wiki.pms':\n ('wiki.pms.npz', 'ca149a2fb138011315bb6d5d61c7a5647e515e51'),\n 'wiki.pnb': ('wiki.pnb.npz', '9ec82d02ad8894056c67991cf8ce927bcca74ee2'\n ), 'wiki.pnt': ('wiki.pnt.npz',\n '3f90123407bb8fc838a0a0d3700a14e15f5b26aa'), 'wiki.ps': ('wiki.ps.npz',\n '7edebc02ac16f5fab83eb10b7d0fab821a9a4d43'), 'wiki.pt': ('wiki.pt.npz',\n 'f172fd801edd1ad9d319ba44146d40b5d682a473'), 'wiki.qu': ('wiki.qu.npz',\n '68bec60ccfe1826c3b3a8968574488dbc74cdf7b'), 'wiki.rm': ('wiki.rm.npz',\n '00fb191fc736ba60cb23e76169dfccde9a9daad0'), 'wiki.rmy': (\n 'wiki.rmy.npz', 'c5e93cc37ff7293b9a1d9fe55c42d6fbde372b97'), 'wiki.rn':\n ('wiki.rn.npz', '57b8e0d6999269be227af6ef2797a9cf8386ff1b'),\n 'wiki.roa_rup': ('wiki.roa_rup.npz',\n 'e06d6b5672a59bb9e83143bc8b28300d23c09546'), 'wiki.roa_tara': (\n 'wiki.roa_tara.npz', 'c083105f40236dc3711f06c1b40e8ee7a714b99d'),\n 'wiki.ro': ('wiki.ro.npz', '766bc0cb58a65b0b1763b9a0d90e91ab982eb20d'),\n 'wiki.rue': ('wiki.rue.npz', '9a91fa093cd48d7d658d526b0ccda48dc59cd7f4'\n ), 'wiki.ru': ('wiki.ru.npz',\n 'd59d099481c22d5592ab9635c9ee48060aa0bf45'), 'wiki.rw': ('wiki.rw.npz',\n 'e99ee87d249f6c157c5c97397d1025d798b85c69'), 'wiki.sah': (\n 'wiki.sah.npz', '85dae39097b29bc8e2b64f343a77794e4a62f91a'), 'wiki.sa':\n ('wiki.sa.npz', '7d1928d7c67400045ac1b35a37a0e3089690d875'), 'wiki.scn':\n ('wiki.scn.npz', '27d7b8050bbeed8ce196061c610216760b053c39'), 'wiki.sc':\n ('wiki.sc.npz', '69c7b8be0f03a1bbd615695f93bdd78f96a58e16'), 'wiki.sco':\n ('wiki.sco.npz', '4880282f59d3338b67fbff75359e2d24896e95bb'), 'wiki.sd':\n ('wiki.sd.npz', '0ed8da4d27223db717a612cf0c88582351db6e19'), 'wiki.se':\n ('wiki.se.npz', '0f4b2e060d5e29f96ca73aab29c967e79db69c17'), 'wiki.sg':\n ('wiki.sg.npz', 'a5e4edf34fe1a88b322da4c3922ec5a470e200c6'), 'wiki.sh':\n ('wiki.sh.npz', 'c13f1e94676bc939560193f7aa7ffd7d604707b3'),\n 'wiki.simple': ('wiki.simple.npz',\n '352d0575e7d60b08e1dfce2c5de713906f0ed78f'), 'wiki.si': ('wiki.si.npz',\n '204f9ffbe7770a9f56d3b2fb26999165015f5c33'), 'wiki.sk': ('wiki.sk.npz',\n '7a9820b5a343b242660bf2595d1ecbf6e00a76d6'), 'wiki.sl': ('wiki.sl.npz',\n '85f3186f26d6725317a64e290363a7251b928b81'), 'wiki.sm': ('wiki.sm.npz',\n '9e13452cc4bff677f4f15db04f9d2f95f6ec054c'), 'wiki.sn': ('wiki.sn.npz',\n 'e8d5f7dcf51280c5f99bc3df849b4889a61e9fcd'), 'wiki.so': ('wiki.so.npz',\n '0f5d71b95768b33fd939a870c15344c4478364a9'), 'wiki.sq': ('wiki.sq.npz',\n '8b05826df8575e65c87a2fc0b7630cf644d4216d'), 'wiki.srn': (\n 'wiki.srn.npz', '2711396ef297ac5dde8904508bc002bdecbcc6f4'), 'wiki.sr':\n ('wiki.sr.npz', '546edc8e29a5d2e99ed10eb4a552cbef2bb8f417'), 'wiki.ss':\n ('wiki.ss.npz', '2e5911bad79bb5270a64f587e326d31c95ec58f3'), 'wiki.st':\n ('wiki.st.npz', '23bc954719a2962e891f02efaea754c9ea025894'), 'wiki.stq':\n ('wiki.stq.npz', 'dd3ece0c0aa30e53ae0f4b558309bb60ab628652'), 'wiki.su':\n ('wiki.su.npz', '7e48732e8a1fcf212e692924a4416a6ac3b3b055'), 'wiki.sv':\n ('wiki.sv.npz', 'b9ec52e9423688f195f3145c243226c0e0b51e83'), 'wiki.sw':\n ('wiki.sw.npz', '5262f0c645322b10eca73f792a970f10b2719e55'), 'wiki.szl':\n ('wiki.szl.npz', 'fdd6d6b291cdbbcec5ff93451a588fdd103bb2d0'), 'wiki.ta':\n ('wiki.ta.npz', 'da7c5bc6e1142306ff2669bf1739832beb6c1763'), 'wiki.tcy':\n ('wiki.tcy.npz', 'baa49e1afa2bb0dcaaef0fac1ee75bbe711d1134'), 'wiki.te':\n ('wiki.te.npz', 'baf48767ce85e4d41d65d25f2bbf1c5f559ec18f'), 'wiki.tet':\n ('wiki.tet.npz', '11e46a893af55344dbe102d530fdfea5d949d3bc'), 'wiki.tg':\n ('wiki.tg.npz', 'da66abb72ec9ccc602713161e544963d59cc51d7'), 'wiki.th':\n ('wiki.th.npz', '25e54bf2d305779ec9baa5f344410bd75c7702fc'), 'wiki.ti':\n ('wiki.ti.npz', '1faf98f3a0eafa7559a4b2a111f43dd1f7b9a05b'), 'wiki.tk':\n ('wiki.tk.npz', '34c714fa8275fd6abfe86b2d144a043774552a6c'), 'wiki.tl':\n ('wiki.tl.npz', '7d7f8a0485155bce7a74a1d778824375b0029f53'), 'wiki.tn':\n ('wiki.tn.npz', 'd0bc3a9b948753ac2283e5e10480c9fa0f6acb53'), 'wiki.to':\n ('wiki.to.npz', 'e982fc31bcfcf7339988d7aad21ce29ac9e84b0b'), 'wiki.tpi':\n ('wiki.tpi.npz', '448cef043fa4b7f97825dbf8ee205ef05543bcac'), 'wiki.tr':\n ('wiki.tr.npz', 'c9830607a4c5134c6191006f1d80bae0ec798fe6'), 'wiki.ts':\n ('wiki.ts.npz', '84a0598803712c8a713943447ddb73fc0f39af43'), 'wiki.tt':\n ('wiki.tt.npz', '82c29df18f33e6284af3e977a6dda7e132a7a225'), 'wiki.tum':\n ('wiki.tum.npz', '358990b894a3fb09d70674465952d828c9b0eda7'), 'wiki.tw':\n ('wiki.tw.npz', '1e6d2838a4f271c1808795fb929cfcbf95094d93'), 'wiki.ty':\n ('wiki.ty.npz', 'e41ca5192d8cb515b3561c8d6935b150deb027b7'), 'wiki.tyv':\n ('wiki.tyv.npz', 'ce062ed32e854604714b65698ae290c99ba28060'),\n 'wiki.udm': ('wiki.udm.npz', '9e1c5891ee0c5ac8f65fc457e1b42c7b2bfc8d37'\n ), 'wiki.ug': ('wiki.ug.npz',\n '656503e54063e200980e39f00fc011395bcd8551'), 'wiki.uk': ('wiki.uk.npz',\n '352b7ee24d9fc6513fff4fe13bc04086c680834a'), 'wiki.ur': ('wiki.ur.npz',\n 'a81e55c7adfc2cef779ce9a01fe21319a7e4943b'), 'wiki.uz': ('wiki.uz.npz',\n 'd60d1e67bb8574dd71c18c88114aba674fc1eecb'), 'wiki.ve': ('wiki.ve.npz',\n '5bfc3dbb3e47d23597df47ef12bd1c64ab8d3ea9'), 'wiki.vep': (\n 'wiki.vep.npz', '7a94355754fbe56802242c0bf9d7a27335095552'), 'wiki.vi':\n ('wiki.vi.npz', 'f118039eb16a4ca3347b6b171eac41113350a041'), 'wiki.vls':\n ('wiki.vls.npz', '9a46a2fdc6448aa54f212081643745499ea7d05c'), 'wiki.vo':\n ('wiki.vo.npz', '8e2f93c85ac608bcc4ae14093b9ff016061378fb'), 'wiki.wa':\n ('wiki.wa.npz', '907074f7743d30cdbb2c48d0c8b4040796ea4164'), 'wiki.war':\n ('wiki.war.npz', '928fb410c394b9c18d875326b6a3e750e2611e1b'), 'wiki.wo':\n ('wiki.wo.npz', '7bb352be44f7261aa926f49b13e77df30f29312f'), 'wiki.wuu':\n ('wiki.wuu.npz', '0d1dc7b05867ff2156a1180ad3da3b4697924e59'),\n 'wiki.xal': ('wiki.xal.npz', 'd87f4a131e086dc0bdc2a7e10406820c3c03b6a9'\n ), 'wiki.xh': ('wiki.xh.npz',\n 'c64e1d2e77d1c744a628e2bd7353284616e48bea'), 'wiki.xmf': (\n 'wiki.xmf.npz', '160b9ee9773b9099aaf37ae9bdbc8a4a93b7f6ea'), 'wiki.yi':\n ('wiki.yi.npz', '0662542cee29f3392fc905004ac6443b32c1477c'), 'wiki.yo':\n ('wiki.yo.npz', '5d12d3b902a1fa19d8548295c3802c0608afa5c8'), 'wiki.za':\n ('wiki.za.npz', '536348ff89df62e968739b567a1245bfd4112fbe'), 'wiki.zea':\n ('wiki.zea.npz', '61fa192289a7c0f73ffa8035632a38b91c31c224'),\n 'wiki.zh_classical': ('wiki.zh_classical.npz',\n '9acc9eaf8ebe316b945fb1f56ac71a2b7e024854'), 'wiki.zh_min_nan': (\n 'wiki.zh_min_nan.npz', '5d38bc025c82af578299d60f7df7b399de6ed81a'),\n 'wiki.zh': ('wiki.zh.npz', '94007fcf3b105bf2c21b84a3a22bdb7946e74804'),\n 'wiki.zh_yue': ('wiki.zh_yue.npz',\n 'af6f0d94e6418d528d6cedd859e07e6e2fb416ab'), 'wiki.zu': ('wiki.zu.npz',\n 'fc9ce07d5d0c49a3c86cf1b26056ada58f9404ca')}\nGOOGLEANALOGY_CATEGORIES = ['capital-common-countries', 'capital-world',\n 'currency', 'city-in-state', 'family', 'gram1-adjective-to-adverb',\n 'gram2-opposite', 'gram3-comparative', 'gram4-superlative',\n 'gram5-present-participle', 'gram6-nationality-adjective',\n 'gram7-past-tense', 'gram8-plural', 'gram9-plural-verbs']\nBATS_CHECKSUMS = {\n 'BATS_3.0/1_Inflectional_morphology/I01 [noun - plural_reg].txt':\n 'cfcba2835edf81abf11b84defd2f4daa3ca0b0bf',\n 'BATS_3.0/1_Inflectional_morphology/I02 [noun - plural_irreg].txt':\n '44dbc56432b79ff5ce2ef80b6840a8aa916524f9',\n 'BATS_3.0/1_Inflectional_morphology/I03 [adj - comparative].txt':\n 'dc530918e98b467b8102a7dab772a66d3db32a73',\n 'BATS_3.0/1_Inflectional_morphology/I04 [adj - superlative].txt':\n '6c6fdfb6c733bc9b298d95013765163f42faf6fb',\n 'BATS_3.0/1_Inflectional_morphology/I05 [verb_inf - 3pSg].txt':\n '39fa47ec7238ddb3f9818bc586f23f55b55418d8',\n 'BATS_3.0/1_Inflectional_morphology/I06 [verb_inf - Ving].txt':\n '8fabeb9f5af6c3e7154a220b7034bbe5b900c36f',\n 'BATS_3.0/1_Inflectional_morphology/I07 [verb_inf - Ved].txt':\n 'aa04df95aa2edb436cbcc03c7b15bc492ece52d6',\n 'BATS_3.0/1_Inflectional_morphology/I08 [verb_Ving - 3pSg].txt':\n '5f22d8121a5043ce76d3b6b53a49a7bb3fe33920',\n 'BATS_3.0/1_Inflectional_morphology/I09 [verb_Ving - Ved].txt':\n '377777c1e793c638e72c010228156d01f916708e',\n 'BATS_3.0/1_Inflectional_morphology/I10 [verb_3pSg - Ved].txt':\n '051c0c3c633e10900f827991dac14cf76da7f022',\n 'BATS_3.0/2_Derivational_morphology/D01 [noun+less_reg].txt':\n '5d6839e9d34ee1e9fddb5bbf6516cf6420b85d8d',\n 'BATS_3.0/2_Derivational_morphology/D02 [un+adj_reg].txt':\n '80b82227a0d5f7377f1e8cebe28c582bfeb1afb5',\n 'BATS_3.0/2_Derivational_morphology/D03 [adj+ly_reg].txt':\n '223e120bd61b3116298a253f392654c15ad5a39a',\n 'BATS_3.0/2_Derivational_morphology/D04 [over+adj_reg].txt':\n 'a56f8685af489bcd09c36f864eba1657ce0a7c28',\n 'BATS_3.0/2_Derivational_morphology/D05 [adj+ness_reg].txt':\n '5da99b1f1781ecfb4a1a7448c715abf07451917b',\n 'BATS_3.0/2_Derivational_morphology/D06 [re+verb_reg].txt':\n '4c5e1796091fade503fbf0bfc2fae2c7f98b5dd2',\n 'BATS_3.0/2_Derivational_morphology/D07 [verb+able_reg].txt':\n 'a6218162bc257d98e875fc667c23edfac59e19fd',\n 'BATS_3.0/2_Derivational_morphology/D08 [verb+er_irreg].txt':\n '9a4236c3bbc23903e101a42fb5ad6e15e552fadf',\n 'BATS_3.0/2_Derivational_morphology/D09 [verb+tion_irreg].txt':\n '3ab0153926d5cf890cf08a4077da6d9946133874',\n 'BATS_3.0/2_Derivational_morphology/D10 [verb+ment_irreg].txt':\n '2a012b87a9a60e128e064c5fe24b60f99e16ddce',\n 'BATS_3.0/3_Encyclopedic_semantics/E01 [country - capital].txt':\n '9890315d3c4e6a38b8ae5fc441858564be3d3dc4',\n 'BATS_3.0/3_Encyclopedic_semantics/E02 [country - language].txt':\n 'ef08a00e8ff7802811ace8f00fabac41b5d03678',\n 'BATS_3.0/3_Encyclopedic_semantics/E03 [UK_city - county].txt':\n '754957101c93a25b438785bd4458404cd9010259',\n 'BATS_3.0/3_Encyclopedic_semantics/E04 [name - nationality].txt':\n '71a6562c34fb6154992a7c3e499375fcc3529c96',\n 'BATS_3.0/3_Encyclopedic_semantics/E05 [name - occupation].txt':\n 'a9a6f9f1af959aef83106f3dbd6bed16dfe9a3ea',\n 'BATS_3.0/3_Encyclopedic_semantics/E06 [animal - young].txt':\n '12d5b51c7b76b9136eadc719abc8cf4806c67b73',\n 'BATS_3.0/3_Encyclopedic_semantics/E07 [animal - sound].txt':\n '91991b007a35f45bd42bd7d0d465c6f8311df911',\n 'BATS_3.0/3_Encyclopedic_semantics/E08 [animal - shelter].txt':\n 'e5af11e216db392986ba0cbb597d861066c29adb',\n 'BATS_3.0/3_Encyclopedic_semantics/E09 [things - color].txt':\n 'd30b2eb2fc7a60f19afda7c54582e30f6fe28f51',\n 'BATS_3.0/3_Encyclopedic_semantics/E10 [male - female].txt':\n '247a588671bc1da8f615e14076bd42573d24b4b3',\n 'BATS_3.0/4_Lexicographic_semantics/L01 [hypernyms - animals].txt':\n '4b5c4dabe2c9c038fafee85d8d3958f1b1dec987',\n 'BATS_3.0/4_Lexicographic_semantics/L02 [hypernyms - misc].txt':\n '83d5ecad78d9de28fd70347731c7ee5918ba43c9',\n 'BATS_3.0/4_Lexicographic_semantics/L03 [hyponyms - misc].txt':\n 'a8319856ae2f76b4d4c030ac7e899bb3a06a9a48',\n 'BATS_3.0/4_Lexicographic_semantics/L04 [meronyms - substance].txt':\n 'c081e1104e1b40725063f4b39d13d1ec12496bfd',\n 'BATS_3.0/4_Lexicographic_semantics/L05 [meronyms - member].txt':\n 'bcbf05f3be76cef990a74674a9999a0bb9790a07',\n 'BATS_3.0/4_Lexicographic_semantics/L06 [meronyms - part].txt':\n '2f9bdcc74b881e1c54b391c9a6e7ea6243b3accc',\n 'BATS_3.0/4_Lexicographic_semantics/L07 [synonyms - intensity].txt':\n '8fa287860b096bef004fe0f6557e4f686e3da81a',\n 'BATS_3.0/4_Lexicographic_semantics/L08 [synonyms - exact].txt':\n 'a17c591961bddefd97ae5df71f9d1559ce7900f4',\n 'BATS_3.0/4_Lexicographic_semantics/L09 [antonyms - gradable].txt':\n '117fbb86504c192b33a5469f2f282e741d9c016d',\n 'BATS_3.0/4_Lexicographic_semantics/L10 [antonyms - binary].txt':\n '3cde2f2c2a0606777b8d7d11d099f316416a7224'}\nBATS_CATEGORIES = {'I01': '[noun - plural_reg]', 'I02':\n '[noun - plural_irreg]', 'I03': '[adj - comparative]', 'I04':\n '[adj - superlative]', 'I05': '[verb_inf - 3pSg]', 'I06':\n '[verb_inf - Ving]', 'I07': '[verb_inf - Ved]', 'I08':\n '[verb_Ving - 3pSg]', 'I09': '[verb_Ving - Ved]', 'I10':\n '[verb_3pSg - Ved]', 'D01': '[noun+less_reg]', 'D02': '[un+adj_reg]',\n 'D03': '[adj+ly_reg]', 'D04': '[over+adj_reg]', 'D05': '[adj+ness_reg]',\n 'D06': '[re+verb_reg]', 'D07': '[verb+able_reg]', 'D08':\n '[verb+er_irreg]', 'D09': '[verb+tion_irreg]', 'D10':\n '[verb+ment_irreg]', 'E01': '[country - capital]', 'E02':\n '[country - language]', 'E03': '[UK_city - county]', 'E04':\n '[name - nationality]', 'E05': '[name - occupation]', 'E06':\n '[animal - young]', 'E07': '[animal - sound]', 'E08':\n '[animal - shelter]', 'E09': '[things - color]', 'E10':\n '[male - female]', 'L01': '[hypernyms - animals]', 'L02':\n '[hypernyms - misc]', 'L03': '[hyponyms - misc]', 'L04':\n '[meronyms - substance]', 'L05': '[meronyms - member]', 'L06':\n '[meronyms - part]', 'L07': '[synonyms - intensity]', 'L08':\n '[synonyms - exact]', 'L09': '[antonyms - gradable]', 'L10':\n '[antonyms - binary]'}\nSEMEVAL17_CHECKSUMS = {'SemEval17-Task2/README.txt':\n 'ad02d4c22fff8a39c9e89a92ba449ec78750af6b',\n 'SemEval17-Task2/task2-scorer.jar':\n '145ef73ce955656d59e3b67b41f8152e8ee018d8',\n 'SemEval17-Task2/test/subtask1-monolingual/data/de.test.data.txt':\n '6fc840f989d2274509549e472a68fb88dd2e149f',\n 'SemEval17-Task2/test/subtask1-monolingual/data/en.test.data.txt':\n '05293fcbd80b2f4aad9b6518ce1a546ad8f61f33',\n 'SemEval17-Task2/test/subtask1-monolingual/data/es.test.data.txt':\n '552904b5988f9951311290ca8fa0441dd4351d4b',\n 'SemEval17-Task2/test/subtask1-monolingual/data/fa.test.data.txt':\n '29d5970feac5982961bd6ab621ba31f83d3bff77',\n 'SemEval17-Task2/test/subtask1-monolingual/data/it.test.data.txt':\n 'c95fe2be8fab37e9c70610117bdedc48a0a8e95c',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/de.test.gold.txt':\n 'c51463460495a242cc726d41713c5e00b66fdd18',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/en.test.gold.txt':\n '2d2bb2ed41308cc60e7953cc9036f7dc89141b48',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/es.test.gold.txt':\n 'a5842ff17fe3847d15414924826a8eb236018bcc',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/fa.test.gold.txt':\n '717bbe035d8ae2bad59416eb3dd4feb7238b97d4',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/it.test.gold.txt':\n 'a342b950109c73afdc86a7829e17c1d8f7c482f0',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-es.test.data.txt':\n 'ef92b1375762f68c700e050d214d3241ccde2319',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-fa.test.data.txt':\n '17aa103981f3193960309bb9b4cc151acaf8136c',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-it.test.data.txt':\n 'eced15e8565689dd67605a82a782d19ee846222a',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-de.test.data.txt':\n '5cb69370a46385a7a3d37cdf2018744be77203a0',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-es.test.data.txt':\n '402f7fed52b60e915fb1be49f935395488cf7a7b',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-fa.test.data.txt':\n '9bdddbbde3da755f2a700bddfc3ed1cd9324ad48',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-it.test.data.txt':\n 'd3b37aac79ca10311352309ef9b172f686ecbb80',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/es-fa.test.data.txt':\n 'a2959aec346c26475a4a6ad4d950ee0545f2381e',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/es-it.test.data.txt':\n 'ca627c30143d9f82a37a8776fabf2cee226dd35c',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/it-fa.test.data.txt':\n 'a03d79a6ce7b798356b53b4e85dbe828247b97ef',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-es.test.gold.txt':\n '7564130011d38daad582b83135010a2a58796df6',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-fa.test.gold.txt':\n 'c9e23c2e5e970e7f95550fbac3362d85b82cc569',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-it.test.gold.txt':\n 'b74cc2609b2bd2ceb5e076f504882a2e0a996a3c',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-de.test.gold.txt':\n '428dfdad2a144642c13c24b845e6b7de6bf5f663',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-es.test.gold.txt':\n '1dd7ab08a10552486299151cdd32ed19b56db682',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-fa.test.gold.txt':\n '17451ac2165aa9b695dae9b1aba20eb8609fb400',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-it.test.gold.txt':\n '5041c0b84a603ed85aa0a5cbe4b1c34f69a2fa7c',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/es-fa.test.gold.txt':\n '8c09a219670dc32ab3864078bf0c28a287accabc',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/es-it.test.gold.txt':\n 'b1cdd13209354cc2fc2f4226c80aaa85558daf4a',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/it-fa.test.gold.txt':\n 'e0b560bb1d2db39ce45e841c8aad611734dc94f1',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/de.trial.data.txt':\n 'dd071fd90f59bec8d271a447d86ee2e462941f52',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/en.trial.data.txt':\n 'e8e5add0850b3dec07f102be26b8791a5e9bbbcf',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/es.trial.data.txt':\n '8956c78ff9ceae1d923a57816e55392c6a7dfc49',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/fa.trial.data.txt':\n '2f7c4247cde0d918b3508e90f6b49a1f5031c81b',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/it.trial.data.txt':\n 'c11e0b5b55f94fc97c7b11fa455e71b071be879f',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/de.trial.gold.txt':\n 'ce5567b1accf3eb07da53229dfcb2a8a1dfac380',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/en.trial.gold.txt':\n '693cb5928e807c79e39136dc0981dadca7832ae6',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/es.trial.gold.txt':\n '8241ca66bf5ba55f77607e9bcfae8e34902715d8',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/fa.trial.gold.txt':\n 'd30701a93c8c5500b82ac2334ed8410f9a23864b',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/it.trial.gold.txt':\n 'bad225573e1216ba8b35429e9fa520a20e8ce031',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/de.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/en.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/es.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/fa.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/it.trial.sample.output.txt'\n : 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-es.trial.data.txt':\n 'c27c8977d8d4434fdc3e59a7b0121d87e0a03237',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-fa.trial.data.txt':\n '88a6f6dd1bba309f7cae7281405e37f442782983',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-it.trial.data.txt':\n 'ebdab0859f3b349fa0120fc8ab98be3394f0d73d',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-de.trial.data.txt':\n '128d1a460fe9836b66f0fcdf59455b02edb9f258',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-es.trial.data.txt':\n '508c5dde8ffcc32ee3009a0d020c7c96a338e1d1',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-fa.trial.data.txt':\n '1a3640eb5facfe15b1e23a07183a2e62ed80c7d9',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-it.trial.data.txt':\n '141c83d591b0292016583d9c23a2cc5514a006aa',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/es-fa.trial.data.txt':\n 'a0a548cd698c389ee80c34d6ec72abed5f1625e5',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/es-it.trial.data.txt':\n '8d42bed8a43ff93d26ca95794758d9392ca707ed',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/it-fa.trial.data.txt':\n '9c85223f1f734de61c28157df0ce417bb0537803',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-es.trial.gold.txt':\n '126c92b2fb3b8f2784dd4ae2a4c52b02a87a8196',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-fa.trial.gold.txt':\n '1db6201c2c8f19744c39dbde8bd4a803859d64c1',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-it.trial.gold.txt':\n '5300bf2ead163ff3981fb41ec5d0e291c287c9e0',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-de.trial.gold.txt':\n 'd4f5205de929bb0c4020e1502a3f2204b5accd51',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-es.trial.gold.txt':\n '3237e11c3a0d9c0f5d583f8dc1d025b97a1f8bfe',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-fa.trial.gold.txt':\n 'c14de7bf326907336a02d499c9b92ab229f3f4f8',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-it.trial.gold.txt':\n '3c0276c4b4e7a6d8a618bbe1ab0f30ad7b07929c',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-fa.trial.gold.txt':\n '359f69e9dfd6411a936baa3392b8f05c398a7707',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-it.trial.gold.txt':\n '44090607fabe5a26926a384e521ef1317f6f00d0',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/it-fa.trial.gold.txt':\n '97b09ffa11803023c2143fd4a4ac4bbc9775e645',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-es.trial.sample.output.txt'\n : 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-fa.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-it.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-de.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-es.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-fa.trial.sample.output.txt'\n : 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-it.trial.sample.output.txt'\n : 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/es-fa.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/es-it.trial.sample.output.txt'\n : 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/it-fa.trial.sample.output.txt'\n : 'a0735361a692be357963959728dacef85ea08240'}\nUD21_DATA_FILE_SHA1 = {'af': {'dev': ('af-ud-dev.conllu',\n 'e37b104f4425ee00afc81779201816d5ac525194'), 'test': (\n 'af-ud-test.conllu', 'd2bf02370d308ee957c04242bd0871db0e488389'),\n 'train': ('af-ud-train.conllu',\n 'a652c7b19c236063d3ea489947f83095893b699a')}, 'grc_proiel': {'dev': (\n 'grc_proiel-ud-dev.conllu', 'd199530c7e40ff0214e510957bb126af0dc12c1c'),\n 'test': ('grc_proiel-ud-test.conllu',\n 'bb7825ddeb18fc2d86638e4725f04563f3e08aab'), 'train': (\n 'grc_proiel-ud-train.conllu',\n 'fe6c861299b033abe8c4ce2b6131cd74f87b96a7')}, 'grc': {'dev': (\n 'grc-ud-dev.conllu', 'debdfec0272cd558ccd29fe0ae2f13175dd20a33'),\n 'test': ('grc-ud-test.conllu',\n 'f19accf31db95e2c736d716d3438c09aa877eb07'), 'train': (\n 'grc-ud-train.conllu', 'e98d3eabea67787c5d43a498f5a0fa4246f38104')},\n 'ar_nyuad': {'dev': ('ar_nyuad-ud-dev.conllu',\n 'b740de9bd68e68b30b9b313eb050d44e94470ca5'), 'test': (\n 'ar_nyuad-ud-test.conllu', 'f5d5b8979b7fedd76235d4bae77e0b4a7b0a750a'),\n 'train': ('ar_nyuad-ud-train.conllu',\n 'd065f03958fd8782a7431b6778c6665ad09444a6')}, 'ar_pud': {'test': (\n 'ar_pud-ud-test.conllu', '2161701e6726b6feb14733a312fba6160b9eb722')},\n 'ar': {'dev': ('ar-ud-dev.conllu',\n '5f8964974d5ba5eb3504cdafb93c34c473c4177c'), 'test': (\n 'ar-ud-test.conllu', '58df161047f310cc3bb4d0e615ca33466e630bb9'),\n 'train': ('ar-ud-train.conllu',\n '0a3d5cefa1fecd6a74f2016ee73ea7a7a02eb359')}, 'eu': {'dev': (\n 'eu-ud-dev.conllu', '3ee15b5ed46ec93d7278c8cc0351d242417d553d'), 'test':\n ('eu-ud-test.conllu', 'aa68d6442ac6dc1abedc19c1b98c4a9944786188'),\n 'train': ('eu-ud-train.conllu',\n 'd56ec997916e38ee6ab1badd78c119e81e4797c9')}, 'be': {'dev': (\n 'be-ud-dev.conllu', '015473e91cf8937c46e8b721f206415abac16a35'), 'test':\n ('be-ud-test.conllu', 'f009ea1885f54cfd77fca8a2c89133b2af8f9f5e'),\n 'train': ('be-ud-train.conllu',\n '26b871e28d2f356a709f106b6e3e86b417ba74e7')}, 'bg': {'dev': (\n 'bg-ud-dev.conllu', '0a2284b10547681eb65691eb2a9f0f1662e16e90'), 'test':\n ('bg-ud-test.conllu', '75ea2a5e1d55bb57efecae6ec2b5ac3cc1b37e57'),\n 'train': ('bg-ud-train.conllu',\n 'd4b2fa267010c4486885c91f3af65ff66c8be94c')}, 'bxr': {'sample': (\n 'bxr-ud-sample.conllu', '9239bdd251a60820c71111ec54de9e7d58a8579d'),\n 'test': ('bxr-ud-test.conllu',\n '0a06e527454ae0b547153222f67eb5db94e528fd')}, 'yue': {'test': (\n 'yue-ud-test.conllu', 'd91477c65aa75cd45489cca13f7a122066972bdb')},\n 'ca': {'dev': ('ca-ud-dev.conllu',\n '5737824f0afff0d07a43db331f102d62c6da2d96'), 'test': (\n 'ca-ud-test.conllu', '0e28bd2a3b982515c1158194ad52bcbbe741e170'),\n 'train': ('ca-ud-train.conllu',\n 'b5ff2392722d4a1df3bfc52fa5b8f2043b7aec0c')}, 'zh_cfl': {'test': (\n 'zh_cfl-ud-test.conllu', '32fe45cd0e4e11ced95202971bce74acbc6a8c30')},\n 'zh_hk': {'test': ('zh_hk-ud-test.conllu',\n '4c75fa5bbcdcb181447b4e037224d50feb2776fb')}, 'zh_pud': {'test': (\n 'zh_pud-ud-test.conllu', 'b3e448884b7b6229379f9723b97c6e9a6fedcb61')},\n 'zh': {'dev': ('zh-ud-dev.conllu',\n '34d8253b35ad2245d59ddffa71b5689ef267b6b2'), 'test': (\n 'zh-ud-test.conllu', '0f00516097650c12262298dd0fbd1b17a6d2bfe2'),\n 'train': ('zh-ud-train.conllu',\n '9444eec5f4561f289ad140e47e49013689512a65')}, 'cop': {'dev': (\n 'cop-ud-dev.conllu', '863d1004df1a92df52515105f6fae6ff68539595'),\n 'test': ('cop-ud-test.conllu',\n 'd3b33566679f071d4ad622ad840cd98381835706'), 'train': (\n 'cop-ud-train.conllu', '33d0e5de5d6077f7c52a4cd90bce0047f3e9ff6f')},\n 'hr': {'dev': ('hr-ud-dev.conllu',\n '8da2a419980807d2e91e09b6bf496e58d442b0ba'), 'test': (\n 'hr-ud-test.conllu', '49d673cba3d32d39d413e557276a45a0214ed83e'),\n 'train': ('hr-ud-train.conllu',\n 'e5cc686bb46c80c84c3ac60ed459e1f124c04c08')}, 'cs_cac': {'dev': (\n 'cs_cac-ud-dev.conllu', '69dfed28c29146b41a3428f4715bde70a6aecf00'),\n 'test': ('cs_cac-ud-test.conllu',\n 'a994b33ebbde486c1818a9df460fb112055e95de'), 'train': (\n 'cs_cac-ud-train.conllu', '694f8559471dc481612606bf5df078daa094a84e')},\n 'cs_cltt': {'dev': ('cs_cltt-ud-dev.conllu',\n 'f35d5dbe57cd95760901ea29de4f493d5d2a44d4'), 'test': (\n 'cs_cltt-ud-test.conllu', 'a8f6696785e658471f759bc736b738a105cba9a3'),\n 'train': ('cs_cltt-ud-train.conllu',\n 'ab97886066bfa462e5da03d25f802489292c0b56')}, 'cs_fictree': {'dev': (\n 'cs_fictree-ud-dev.conllu', 'dc67c07737a3a8bf2633068941f2d55f1500e192'),\n 'test': ('cs_fictree-ud-test.conllu',\n '06becaedef1cfdb8e1b2dce3f0d3a3a607d178a4'), 'train': (\n 'cs_fictree-ud-train.conllu',\n 'fe7dbe3a0e6ee73e19e788c43bbb8f8f47ae1645')}, 'cs_pud': {'test': (\n 'cs_pud-ud-test.conllu', '9f205677041de694157ba2ef3e1eadb44d467f2f')},\n 'cs': {'dev': ('cs-ud-dev.conllu',\n 'd609e895b21b8710337e23a98b58ffd7b7a54bf1'), 'test': (\n 'cs-ud-test.conllu', '34091286a11b1ce2a9c8bcfa03fdd86fb0e13965'),\n 'train': ('cs-ud-train.conllu',\n 'd1f855798a29d433b580d01ade0d8d062cd58534')}, 'da': {'dev': (\n 'da-ud-dev.conllu', '2c0c798c20a2efb30273172d388342a82bb0ce3c'), 'test':\n ('da-ud-test.conllu', '85a95a8527f8773f1575ceaf0ab51f204b211047'),\n 'train': ('da-ud-train.conllu',\n 'b653c029a7ae5c106f865dcef949fb3fe2aa0420')}, 'nl_lassysmall': {'dev':\n ('nl_lassysmall-ud-dev.conllu',\n '2a169af74c2206c9073c3932b4a300492a314ee5'), 'test': (\n 'nl_lassysmall-ud-test.conllu',\n '39f08896a40ad370f2acc37d58689cdc43a660a9'), 'train': (\n 'nl_lassysmall-ud-train.conllu',\n 'e4fd6bac246c81bb17a3c932e251b8662739cc19')}, 'nl': {'dev': (\n 'nl-ud-dev.conllu', '33a9387eef9f5c0b15bd1e76e78776863f1f6d90'), 'test':\n ('nl-ud-test.conllu', '01b3e1048792c851fdd59882c353fcdb76dc165e'),\n 'train': ('nl-ud-train.conllu',\n '8e6a10152b7d09ce61433dd5f715ab2401611cf6')}, 'en_lines': {'dev': (\n 'en_lines-ud-dev.conllu', '83b63b7670ea4394b558bc26e16a004339f0a0ef'),\n 'test': ('en_lines-ud-test.conllu',\n 'ccc9d3c71a873313d138c3adb12405a97eb270d8'), 'train': (\n 'en_lines-ud-train.conllu', 'da42bfac9fd97d98ebbbc37c65d83ff4c53b4e79')\n }, 'en_pud': {'test': ('en_pud-ud-test.conllu',\n '4a9c83ba058a7e51979af790ba0440cc274b948f')}, 'en_partut': {'dev': (\n 'en_partut-ud-dev.conllu', '863a6f571158acaaca95223e50bd08fc0c1134f0'),\n 'test': ('en_partut-ud-test.conllu',\n '0c0780b0f14e4623f1014e6496d639cd2d2f6ffd'), 'train': (\n 'en_partut-ud-train.conllu', 'e00a2d6f7efa28c8aaa40dccdf29b59a50f48e18'\n )}, 'en': {'dev': ('en-ud-dev.conllu',\n 'e2159dda4400d289ad8a403b466c8d23d733ba35'), 'test': (\n 'en-ud-test.conllu', 'bd36ef23f76155625b379d063427bd62f19b7658'),\n 'train': ('en-ud-train.conllu',\n '993c44f62104971fe2d056847349facbb7986258')}, 'et': {'dev': (\n 'et-ud-dev.conllu', '312f9477f7ee1dd380c1fbcf77a6f0c63476fdbb'), 'test':\n ('et-ud-test.conllu', 'd70907f0771b41a27406672b9d91043a0954f946'),\n 'train': ('et-ud-train.conllu',\n 'b6d788e7a3362d0984d1cff06c1ba3d66f6bf773')}, 'fi_ftb': {'dev': (\n 'fi_ftb-ud-dev.conllu', '552ec574acdb3209e7545af4e16a43a1e2956979'),\n 'test': ('fi_ftb-ud-test.conllu',\n '13c34838a0fa9e379f9624ed1f4c368ca50a7d98'), 'train': (\n 'fi_ftb-ud-train.conllu', '73d025250bfc82a24181b5ed601dc4ae7c8e846c')},\n 'fi_pud': {'test': ('fi_pud-ud-test.conllu',\n '4ab7b0d99ce6697d79732e401be97585a28c2afa')}, 'fi': {'dev': (\n 'fi-ud-dev.conllu', 'e023cf7eaffbda20bd4518d87fe9086207bb5361'), 'test':\n ('fi-ud-test.conllu', 'fd57c5106e43994250f4472890572bdbb8b4a48b'),\n 'train': ('fi-ud-train.conllu',\n 'ab27bda8cbb62886196b78de87985a4c6cf8215d')}, 'fr_ftb': {'dev': (\n 'fr_ftb-ud-dev.conllu', '71b3cc02601f64711f98e33a6b2af10aa00700be'),\n 'test': ('fr_ftb-ud-test.conllu',\n '723b8c44e74202a18b7e71268b738a5e1aa15f86'), 'train': (\n 'fr_ftb-ud-train.conllu', '9a347120478254647deb7c7e02871b28aad23ec4')},\n 'fr_pud': {'test': ('fr_pud-ud-test.conllu',\n '570b7e31dc359ed62123bea6546efa13cfc2cf25')}, 'fr_partut': {'dev': (\n 'fr_partut-ud-dev.conllu', '1505030048829a8dccc466cc86bca057996301ae'),\n 'test': ('fr_partut-ud-test.conllu',\n 'f6446317c9f82cc0b70a76be75282804a3359ac0'), 'train': (\n 'fr_partut-ud-train.conllu', 'f87c246cfa91186b90c7780cb64783034f196622'\n )}, 'fr_sequoia': {'dev': ('fr_sequoia-ud-dev.conllu',\n '859b10d80c7b3a382571cce9b2620039673539d1'), 'test': (\n 'fr_sequoia-ud-test.conllu', 'be0ef69e392e64030414748da2995433f23e033d'\n ), 'train': ('fr_sequoia-ud-train.conllu',\n '48ac01913518888a32670a687123ed1bac57e0e9')}, 'fr': {'dev': (\n 'fr-ud-dev.conllu', '5de0aee778bcc69d14285ada88f0ff7e5ac0a0cd'), 'test':\n ('fr-ud-test.conllu', 'd20a014acd38193155a33a5233c13f89541c78c3'),\n 'train': ('fr-ud-train.conllu',\n 'feee0cc85a2d7dcb3397399ef22c8af8ef75420b')}, 'gl_treegal': {'dev': (\n 'gl_treegal-ud-dev.conllu', '272558614cff4a5e1f2805626904e6dc488b8d25'),\n 'test': ('gl_treegal-ud-test.conllu',\n '18d99474d3aa9c83878c42a79d7881330dd9b861'), 'train': (\n 'gl_treegal-ud-train.conllu',\n 'b1691dd5f587a19eb9dc6f141ecbd3eec3bb0e07')}, 'gl': {'dev': (\n 'gl-ud-dev.conllu', 'e72390dce9bf973442deef31ed0cd7a975361fe5'), 'test':\n ('gl-ud-test.conllu', '7d82ba3672bd4427674428e1dcbcae4feebc3aeb'),\n 'train': ('gl-ud-train.conllu',\n 'd586e7bffa314f8c5b85288e060e68dddc1f5d33')}, 'de_pud': {'test': (\n 'de_pud-ud-test.conllu', '2c91e42b7345145290b68385ff5270910048b8c4')},\n 'de': {'dev': ('de-ud-dev.conllu',\n '9b4f49bfa2b609d54369890d9e7d8d24a3c229af'), 'test': (\n 'de-ud-test.conllu', '48f0f6f98b38710906481b5e9fe1d459d28f1b4a'),\n 'train': ('de-ud-train.conllu',\n '04a1d6a6a2da9d9c38496118e0432c9a6720db64')}, 'got': {'dev': (\n 'got-ud-dev.conllu', '501c47193ca2af5826e4afcc04941df87a7c47c3'),\n 'test': ('got-ud-test.conllu',\n 'cfcf16d562434987562bd1f5faa0d8c007e9ddb8'), 'train': (\n 'got-ud-train.conllu', 'b4951ede89d947c6617df782ac248566235f78fb')},\n 'el': {'dev': ('el-ud-dev.conllu',\n '9df0919ed6f9dcab3ba3f60f0ad31d0c79ae6cdb'), 'test': (\n 'el-ud-test.conllu', '1bb4a6b24521f0c3c7d6cf71e2456ef3a1ee31aa'),\n 'train': ('el-ud-train.conllu',\n '32f4abc821624c4cd4d3b3b555c1558f06366e2c')}, 'he': {'dev': (\n 'he-ud-dev.conllu', 'c5b76874fcf11c7733e1555957bb49e8298af140'), 'test':\n ('he-ud-test.conllu', '4fbe4115948250fc2e42dd43399d1c6c11ddcfd2'),\n 'train': ('he-ud-train.conllu',\n 'eae49a515b38d224b109138bf006a112e80a7caf')}, 'hi_pud': {'test': (\n 'hi_pud-ud-test.conllu', 'd237fecc594186e7a52ad33313ac52e927905d73')},\n 'hi': {'dev': ('hi-ud-dev.conllu',\n '48b592bb1aa1cbc30d41d2913421cfd3f9d2c790'), 'test': (\n 'hi-ud-test.conllu', '004a7fdde368f32f9f230bc5e2cf4ce9e1d8f8d7'),\n 'train': ('hi-ud-train.conllu',\n '9be8afb2cabda361817c55b3de6ebba2c3fef7e0')}, 'hu': {'dev': (\n 'hu-ud-dev.conllu', 'ec622e6bcf2a84b0b47eba0de01cf5768157a50e'), 'test':\n ('hu-ud-test.conllu', 'fd717d25add38c2fb2dc8e82e2f9e5b0b9f3c5b8'),\n 'train': ('hu-ud-train.conllu',\n 'e5486523a8bebe40d633ad8b4050be8a3d11c78a')}, 'id': {'dev': (\n 'id-ud-dev.conllu', '7b181aa954a4f4b22b80a18e4f67cbf423e9c701'), 'test':\n ('id-ud-test.conllu', '357ed8c216725760bf5be561ed6e918ce602b5ac'),\n 'train': ('id-ud-train.conllu',\n '328ea588b75de55ef48373c2bf9983bca277d724')}, 'ga': {'dev': (\n 'ga-ud-dev.conllu', '180a1a9dcfcec6528a559032c67e9a15693a039d'), 'test':\n ('ga-ud-test.conllu', 'b74a56372af3f68f089ea82ba858e5a82aae4e22'),\n 'train': ('ga-ud-train.conllu',\n '40df0b12fbadae6e56c0a01da483d6c612d9450c')}, 'it_pud': {'test': (\n 'it_pud-ud-test.conllu', 'c7121c03dbdc7d27f89c6f6dd8f046b89233438e')},\n 'it_partut': {'dev': ('it_partut-ud-dev.conllu',\n '0bb5dc0c0815212c9832eaef3b802cf885e0543b'), 'test': (\n 'it_partut-ud-test.conllu', 'b5eccd3d9a94a2f96c8c3a6e4192a287ac563898'),\n 'train': ('it_partut-ud-train.conllu',\n '784b18bf8d3b59d967d147075a3cb5b03fb28637')}, 'it_postwita': {'dev': (\n 'it_postwita-ud-dev.conllu', '07f6f658246aa070e2166e688f7569d61aafff54'\n ), 'test': ('it_postwita-ud-test.conllu',\n 'c2d58f50e51d37cb5f55bd0a3129138e95a72a8a'), 'train': (\n 'it_postwita-ud-train.conllu',\n '69684c47fba99230f6ef1a204b95c37d28eaa5a6')}, 'it': {'dev': (\n 'it-ud-dev.conllu', 'ea8fd59f36280fbd77b9a807959491636048a698'), 'test':\n ('it-ud-test.conllu', '34839fdeeef883f8034c723a18772947106cec6b'),\n 'train': ('it-ud-train.conllu',\n 'a0cae413f46a344366f86bc7ffe4f5d7ecbf6a14')}, 'ja_pud': {'test': (\n 'ja_pud-ud-test.conllu', '4c914016a0968ca434348370d38c9579a60e8fd7')},\n 'ja': {'dev': ('ja-ud-dev.conllu',\n '21f06fef7fbeccd05a298385bf40f8b4ffe95146'), 'test': (\n 'ja-ud-test.conllu', '240d3532698356a7c6f93c3215718ef2f66a672f'),\n 'train': ('ja-ud-train.conllu',\n '35eaf307d94c2006241fe08f745d7b1b17f049cf')}, 'kk': {'dev': (\n 'kk-ud-dev.conllu', '038033c822b407040a4ecb87c077506cd0d1a322'), 'test':\n ('kk-ud-test.conllu', '4124bcaa6e4fc132613d94a882abcff8ecad8ca0'),\n 'train': ('kk-ud-train.conllu',\n '48d664d273ad6731cb65228ce9b57ad3cf50f7f5')}, 'ko': {'dev': (\n 'ko-ud-dev.conllu', '60e7da7cca44c923873a062e80262726659f5528'), 'test':\n ('ko-ud-test.conllu', 'bc9a0fc4ddfed14b70bb58048bf8b8d50062cffd'),\n 'train': ('ko-ud-train.conllu',\n 'ee21328f9ea39668e802f0cb6a794358f5c256bf')}, 'kmr': {'sample': (\n 'kmr-ud-sample.conllu', 'd76d631400d17b63b9592ce3c0f4ecada012d6d0'),\n 'test': ('kmr-ud-test.conllu',\n '606a338db2d6adde6b4d7d8c9ee2bdf1f988d729')}, 'la_ittb': {'dev': (\n 'la_ittb-ud-dev.conllu', 'd9f17992bd0258a734aea9b6c53759039717c86a'),\n 'test': ('la_ittb-ud-test.conllu',\n 'f4d097d076083240c48594d4cb058840ff16be8e'), 'train': (\n 'la_ittb-ud-train.conllu', '627d5b30b20655efab194c75fc9219b0aa2cf4b6')},\n 'la_proiel': {'dev': ('la_proiel-ud-dev.conllu',\n '9a510ff1f29b507ce46d32c04eb8f02ec8bdb4fb'), 'test': (\n 'la_proiel-ud-test.conllu', '697dbeae38507856a4fafa8506dfc8db5e8e4054'),\n 'train': ('la_proiel-ud-train.conllu',\n '5e57e0a83ed8dcdfcc892c2558249cb6bc02b37a')}, 'la': {'dev': (\n 'la-ud-dev.conllu', '2748bb0479cb599e1a007d1d1634d5870b45549b'), 'test':\n ('la-ud-test.conllu', '19c62c64ce41a650e9b55a345c61e7c0d994816e'),\n 'train': ('la-ud-train.conllu',\n '183ce6f58b0305e5926161e29b9a6aacc424662c')}, 'lv': {'dev': (\n 'lv-ud-dev.conllu', '6bf3843d92aeb5b4a5e3b457708ad0aca176fbd2'), 'test':\n ('lv-ud-test.conllu', '9f7806a24656db0e859efe041a88926b220b8e28'),\n 'train': ('lv-ud-train.conllu',\n 'f1eeff608e8f27d92b683ae041591355198841eb')}, 'lt': {'dev': (\n 'lt-ud-dev.conllu', '0b8dc19005571fa7b66d8302b797d51a241f128b'), 'test':\n ('lt-ud-test.conllu', 'def54d6caf97610eb4ca8c0179d661c8eab98951'),\n 'train': ('lt-ud-train.conllu',\n '13fe42a3d21f17a5cad5aaf38692619c7713e177')}, 'mr': {'dev': (\n 'mr-ud-dev.conllu', 'abf7ac90a3696bb979e6ddc17cbc0fc761040b1b'), 'test':\n ('mr-ud-test.conllu', 'b70e2a135e69dc17474951bfd9c7cf3f203d4798'),\n 'train': ('mr-ud-train.conllu',\n '24a1370184054a7f5af647997dca783d6c571242')}, 'sme': {'sample': (\n 'sme-ud-sample.conllu', '8c456f06b363c4d273fc454a49505f783f00fe43'),\n 'test': ('sme-ud-test.conllu',\n '6c2084f60d7f2d1468a0cb4f4a4b9669274b122e'), 'train': (\n 'sme-ud-train.conllu', '203eab4183fd585efe3fea7e6df493a6746b0a9f')},\n 'no_bokmaal': {'dev': ('no_bokmaal-ud-dev.conllu',\n '3a1aa6646ee62c605a6e5a7b535434ce93d0581f'), 'test': (\n 'no_bokmaal-ud-test.conllu', '18336ef0e4877ae28eb7d6019afe05b5a53245d5'\n ), 'train': ('no_bokmaal-ud-train.conllu',\n 'c6a1d75956dfb9376e568bf241b3ee5ebf3be3a5')}, 'no_nynorsk': {'dev': (\n 'no_nynorsk-ud-dev.conllu', '5b95a070d11a61a23fc340ecbbbbb70f86884498'),\n 'test': ('no_nynorsk-ud-test.conllu',\n '3eaab8e4af82de2333521e9be0954ffaf6b1440b'), 'train': (\n 'no_nynorsk-ud-train.conllu',\n '79319993097c30ddf28d4c1137b8662f4f35d17e')}, 'no_nynorsklia': {'dev':\n ('no_nynorsklia-ud-dev.conllu',\n 'f3e3cc9b156784c12e7540b6e09a19963df8d7d9'), 'test': (\n 'no_nynorsklia-ud-test.conllu',\n 'c43abf4ad0d9c1d844edb9ff0fdf8b00949c4a0b')}, 'cu': {'dev': (\n 'cu-ud-dev.conllu', '0b67035ed5ca52aeefae443611232ed202fb990a'), 'test':\n ('cu-ud-test.conllu', '0fed872a5a2480b601c67ebbecf8dcd680b6863b'),\n 'train': ('cu-ud-train.conllu',\n '1c58f7322b96aa65e2b6bbeb5cb5226b46dc3ef0')}, 'fa': {'dev': (\n 'fa-ud-dev.conllu', '098f97ff4c0a6a9dcaafe2c83908b1ff044b4446'), 'test':\n ('fa-ud-test.conllu', '0024aa6bad5eceed2e36f77d88578304a5886a80'),\n 'train': ('fa-ud-train.conllu',\n '1692f90f58fb1ed2faaa4e8c5d2d47a37c47082b')}, 'pl': {'dev': (\n 'pl-ud-dev.conllu', 'b7af7bee091feb0788eb9793a7102972006421dc'), 'test':\n ('pl-ud-test.conllu', 'e141e793ba35f8a08510ec1ce494099b5c800ca8'),\n 'train': ('pl-ud-train.conllu',\n 'f2227ba184a5030fc47b1aff732e04ae11b9ab94')}, 'pt_br': {'dev': (\n 'pt_br-ud-dev.conllu', '8eedc77096a87fe8ab251100d460780e161e5397'),\n 'test': ('pt_br-ud-test.conllu',\n '37a64e3acef107b62ab62ce478fc36ed112fb58f'), 'train': (\n 'pt_br-ud-train.conllu', '023cafcb6959d52298ad619f7838f26db9798aa9')},\n 'pt_pud': {'test': ('pt_pud-ud-test.conllu',\n '4f7a98b59255ff58a1a423dda6f2cb7261dcea7d')}, 'pt': {'dev': (\n 'pt-ud-dev.conllu', '2171b4ac2b0726c9dfae6adf394b76be927accab'), 'test':\n ('pt-ud-test.conllu', '9e819a4592db42905806141d6fca3b7b20396ce3'),\n 'train': ('pt-ud-train.conllu',\n 'b5fbb6598d5cc53a0f7e699adeb4a61948a49b5c')}, 'ro_nonstandard': {'test':\n ('ro_nonstandard-ud-test.conllu',\n '300d53091412dc5700dc5cad0fd3e136f7c8cb11'), 'train': (\n 'ro_nonstandard-ud-train.conllu',\n 'ed97f51129b63857627f838f68f41c9ef8541686')}, 'ro': {'dev': (\n 'ro-ud-dev.conllu', 'a320e29582e837fa48bbe0aab8e205cadfcb4a02'), 'test':\n ('ro-ud-test.conllu', '0cfe4806a28ebdc02dc7ea58635d8b550c3a9d7b'),\n 'train': ('ro-ud-train.conllu',\n '74beb2aa92d2fca50dbb1a4f716b936afb436ab9')}, 'ru_pud': {'test': (\n 'ru_pud-ud-test.conllu', 'bca81ce7aaf3cb8add98b19faecc1d8303901631')},\n 'ru_syntagrus': {'dev': ('ru_syntagrus-ud-dev.conllu',\n '304c6ec7fb5060583af5f890384e3a480f8c3ad5'), 'test': (\n 'ru_syntagrus-ud-test.conllu',\n 'c138e39b48dc1c66d106e68ee75c6fce28ef780c'), 'train': (\n 'ru_syntagrus-ud-train.conllu',\n '8fa56fa80845e4ad946189d1e7af228b5595e312')}, 'ru': {'dev': (\n 'ru-ud-dev.conllu', 'd3b11c0fd8a87bfb7ce9666a1888126ae5ddca90'), 'test':\n ('ru-ud-test.conllu', 'ae13bbf49e0d2fddae8ba2eeacd15a9a77c7bfff'),\n 'train': ('ru-ud-train.conllu',\n 'fd43e7323ad2e62a6924fc5b5d48e85c6ab5a430')}, 'sa': {'test': (\n 'sa-ud-test.conllu', 'fad3a03a6834884a092b1d326625c6f663e36636')}, 'sr':\n {'dev': ('sr-ud-dev.conllu', 'dcb9a242986285e83512ddaa4b3ada07c4cea17a'\n ), 'test': ('sr-ud-test.conllu',\n '0f0c9e394c440bb2dd514bdd6873d3ffef13821b'), 'train': (\n 'sr-ud-train.conllu', '97ea9bfe4ac97011598fbb5ca20b5cbaf5093334')},\n 'sk': {'dev': ('sk-ud-dev.conllu',\n 'c84563c08922d60b0c765e9f9c22d9f6f2765ff9'), 'test': (\n 'sk-ud-test.conllu', '89af4581c5f9058809f48788eb635a92cda0603c'),\n 'train': ('sk-ud-train.conllu',\n '89e108093bbf5619578955fdadfe200cefd8cf01')}, 'sl_sst': {'dev': (\n 'sl_sst-ud-dev.conllu', 'c65ae82123af95ec11f47262546b5ab2fc5735e5'),\n 'test': ('sl_sst-ud-test.conllu',\n '144a0124c1181b49d0c542a4a6d4465e45545f3b'), 'train': (\n 'sl_sst-ud-train.conllu', '4cbb97d5c19cfb1d85cdd54a13e24de2343a4ac5')},\n 'sl': {'dev': ('sl-ud-dev.conllu',\n '0078572c19574d32defeae9924176da2dd701ede'), 'test': (\n 'sl-ud-test.conllu', '616ace00e25df99be8dd49b7bf7c48f1093df96a'),\n 'train': ('sl-ud-train.conllu',\n '1462ac69163b30cf1399527e95f686ebf91be2d3')}, 'es_ancora': {'dev': (\n 'es_ancora-ud-dev.conllu', '94b00cc6449a1793b5ba1d9d5c1e4b34ad1cc7d5'),\n 'test': ('es_ancora-ud-test.conllu',\n '8d7dc8d8441e1ca4b54708a5382ed61b48bf7920'), 'train': (\n 'es_ancora-ud-train.conllu', '95d5bf7ad33304f3440ffb014ac094c4967c303f'\n )}, 'es_pud': {'test': ('es_pud-ud-test.conllu',\n 'c2b17fce1da3bdd2a50d9dd7eca101db1d2907e0')}, 'es': {'dev': (\n 'es-ud-dev.conllu', '4cdb828c492c6b7707af0ab6c7fbf734f770630a'), 'test':\n ('es-ud-test.conllu', 'afd1ae1b7eb73a91456c30acf388eef4faf4785a'),\n 'train': ('es-ud-train.conllu',\n '5ce48b44ba1b3e748a40cb5bf893d3096518ecbc')}, 'sv_lines': {'dev': (\n 'sv_lines-ud-dev.conllu', '15f1a04d960518fe7bfee23ce227fc7b78d4b755'),\n 'test': ('sv_lines-ud-test.conllu',\n '843df4ea3ab4f551b1eaa661652a8d6489a81d41'), 'train': (\n 'sv_lines-ud-train.conllu', '16e3533bf174b36d728847a36a3600f16c63baa6')\n }, 'sv_pud': {'test': ('sv_pud-ud-test.conllu',\n '18dadac0c15468256b340835ebc0529facbe9b73')}, 'sv': {'dev': (\n 'sv-ud-dev.conllu', '6d14e1aae5c9ae37c35481c44c04bf74a4233455'), 'test':\n ('sv-ud-test.conllu', '7ead0f7b49508db0022c042195ac5925b611c5b7'),\n 'train': ('sv-ud-train.conllu',\n '68affb85efde6ed017eab1e998e9666108559e04')}, 'swl': {'dev': (\n 'swl-ud-dev.conllu', '828e0a08f12cabfa75f9dd2b53dba58606522a7c'),\n 'test': ('swl-ud-test.conllu',\n '674f76631cf16172d67b795ff92dfbb297eb4930'), 'train': (\n 'swl-ud-train.conllu', '46b721f9cae2d5ba43f818dd487600b0ce76362a')},\n 'ta': {'dev': ('ta-ud-dev.conllu',\n '4d01f555012ddc1976933d4d928e26470f71bfa1'), 'test': (\n 'ta-ud-test.conllu', 'e8db8816a98d8b7e81188786db7c405979a7e3c3'),\n 'train': ('ta-ud-train.conllu',\n '6753d8c7b1b016de39c087aab45056de6021c3ae')}, 'te': {'dev': (\n 'te-ud-dev.conllu', '29f46355d767e54e8565f76a063c43e95ead0fca'), 'test':\n ('te-ud-test.conllu', '50abe345d4ab5bae021cacd096266c57b00572b8'),\n 'train': ('te-ud-train.conllu',\n '1794469abe09e7364cda0d9764cf515dcb4a61b6')}, 'tr_pud': {'test': (\n 'tr_pud-ud-test.conllu', 'aae839e2476a2f149c98e0274d245d07a50dafaa')},\n 'tr': {'dev': ('tr-ud-dev.conllu',\n '421de4d8d0fbdda46750523bde72880414c134a3'), 'test': (\n 'tr-ud-test.conllu', 'b175f136f6f0271c494a58a1846971c4a07cda27'),\n 'train': ('tr-ud-train.conllu',\n '5aeaf25fc9e00c75e377983a0d0a642e4df6ae7d')}, 'uk': {'dev': (\n 'uk-ud-dev.conllu', '0d3e3507edcd46a3eaa8c4702d0f5d84661a6d9d'), 'test':\n ('uk-ud-test.conllu', '46c88fd623894fabdafb01a826016c215e4f65cc'),\n 'train': ('uk-ud-train.conllu',\n 'd06e0e2fa67c35a20517738bd728ac3b26d8eafe')}, 'hsb': {'sample': (\n 'hsb-ud-sample.conllu', '148eddbb19b06115ea54e17a3fca58e99a85cbd9'),\n 'test': ('hsb-ud-test.conllu',\n '3d319288b4c06395b2627980737131995949f770')}, 'ur': {'dev': (\n 'ur-ud-dev.conllu', 'dc41e72b5adeb92f308cdc8dfcbf71f84b4a5cf9'), 'test':\n ('ur-ud-test.conllu', 'af5da25be4c4ec1f2a222bc462b39ca4bbcc0eb0'),\n 'train': ('ur-ud-train.conllu',\n '488d65b394d0de264be1221614c09e541f92f9de')}, 'ug': {'dev': (\n 'ug-ud-dev.conllu', 'a2e6cd7ef51ffd7c83de7c62fbad998f1020f857'), 'test':\n ('ug-ud-test.conllu', '4877323d8dbfaa8ab862f0aa8e5484fdadb9ef43')},\n 'vi': {'dev': ('vi-ud-dev.conllu',\n '1c733d3ea3e4cce00cb0aa4d599bcb3b0a6096a8'), 'test': (\n 'vi-ud-test.conllu', '1bb822e58f21aa5ccac15fe6c6742a42e8389d41'),\n 'train': ('vi-ud-train.conllu',\n 'ac86132afc061625740abd524c5cdf3d35ebbbc4')}}\n",
"step-3": "# coding: utf-8\n\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=too-many-lines\n\"\"\"Constants.\"\"\"\n\nUNK_TOKEN = '<unk>'\n\nBOS_TOKEN = '<bos>'\n\nEOS_TOKEN = '<eos>'\n\nPAD_TOKEN = '<pad>'\n\nUNK_IDX = 0 # This should not be changed as long as serialized token\n # embeddings redistributed on S3 contain an unknown token.\n # Blame this code change and see commit for more context.\n\nLARGE_POSITIVE_FLOAT = 1e18\n\nLARGE_NEGATIVE_FLOAT = -LARGE_POSITIVE_FLOAT\n\nGLOVE_NPZ_SHA1 = \\\n {'glove.42B.300d': ('glove.42B.300d.npz',\n '7deee8f4860744db53ed9e50892effe9883e6d89'),\n 'glove.6B.100d': ('glove.6B.100d.npz',\n '01f80f202fcabcc3e0804898349087bfc191dd1c'),\n 'glove.6B.200d': ('glove.6B.200d.npz',\n '5e6e2bdab346c257f88d80d215d518e680d86e32'),\n 'glove.6B.300d': ('glove.6B.300d.npz',\n '1db264aa936be62f055dfb72854204450bdf4399'),\n 'glove.6B.50d': ('glove.6B.50d.npz',\n 'aa16be8d184399d2199f83fd62586f2c30497bfa'),\n 'glove.840B.300d': ('glove.840B.300d.npz',\n 'b4ba390c1154736e07c0e67d9180935f5930e83c'),\n 'glove.twitter.27B.100d': ('glove.twitter.27B.100d.npz',\n '0f7b82c223451d0002f79ba23596983cdbe0e2b1'),\n 'glove.twitter.27B.200d': ('glove.twitter.27B.200d.npz',\n '41cc2d26f58a54622ce96bf6c8434360ab524f20'),\n 'glove.twitter.27B.25d': ('glove.twitter.27B.25d.npz',\n '9f563d2f296995598cc46812b2fda05ad4c3c879'),\n 'glove.twitter.27B.50d': ('glove.twitter.27B.50d.npz',\n 'ce9959c056f2a0a780c468feeb4f823af51630e9')}\n\nFAST_TEXT_NPZ_SHA1 = \\\n {'crawl-300d-2M': ('crawl-300d-2M.npz',\n '9dd611a1fe280c63050cd546d3595400fc0eede4'),\n 'wiki.aa': ('wiki.aa.npz',\n '48f163b80eb37f1806142169d3d4c05cf75b7339'),\n 'wiki.ab': ('wiki.ab.npz',\n '860ceff119dd27e5b701b605879037c1310cbc3e'),\n 'wiki.ace': ('wiki.ace.npz',\n '62938287464040491719f56a6f521f8f808beee8'),\n 'wiki.ady': ('wiki.ady.npz',\n '646843afa260d018ed711df3f1ca9c3e000447b6'),\n 'wiki.af': ('wiki.af.npz',\n '7b14cd27690b67fea318d0bac2283c16430680e2'),\n 'wiki.ak': ('wiki.ak.npz',\n '20f309adad1c45958c97b6055d5838e05bbaea72'),\n 'wiki.als': ('wiki.als.npz',\n 'a8b03aa133c4f7da12fc27c2b167b7918b1e9805'),\n 'wiki.am': ('wiki.am.npz',\n 'ed3dd10cea64737f7a1623612ee099df9dc19f66'),\n 'wiki.ang': ('wiki.ang.npz',\n '8efe64706d9d6b8eae38b2c7ff0b277e20592bc7'),\n 'wiki.an': ('wiki.an.npz',\n '168046283c719ab96a29b1abae2e25a6575c7be8'),\n 'wiki.arc': ('wiki.arc.npz',\n '049021b7decea4bc009b12936e56b4dbf5b760e7'),\n 'wiki.ar': ('wiki.ar.npz',\n '7e325e1e98dfcdc9368d2ebe40ee834a2ed44912'),\n 'wiki.arz': ('wiki.arz.npz',\n '7d851c2c7be3ee6f7fd896de7b76ea08e3fb08b0'),\n 'wiki.as': ('wiki.as.npz',\n '01d38c29cd4bd99c1a8534abc058822da14a5b9c'),\n 'wiki.ast': ('wiki.ast.npz',\n '9c9846ba5084505a0adea89c95c66e04efbf5ce9'),\n 'wiki.av': ('wiki.av.npz',\n '7ef6a920c364638504e673cfde5f7675503fa81e'),\n 'wiki.ay': ('wiki.ay.npz',\n 'c1202e110930e3902397f5cb64a8359e013b469f'),\n 'wiki.azb': ('wiki.azb.npz',\n '10351b7ef14ec2cb610d290cb6a3f6987ef5d8b3'),\n 'wiki.az': ('wiki.az.npz',\n '74257c3bcd533a606afae509ea835dc036d61546'),\n 'wiki.ba': ('wiki.ba.npz',\n '4a2857ed694d66864df562b376c2fa12fcb03646'),\n 'wiki.bar': ('wiki.bar.npz',\n 'e65c6b7e9ff83798d1eea05d166148837d53e615'),\n 'wiki.bat_smg': ('wiki.bat_smg.npz',\n '6420584ae28ba6c9dd145fea8f096243d457c2d8'),\n 'wiki.bcl': ('wiki.bcl.npz',\n '33606c970ab336b678393e2bdb8af2116d11cf7b'),\n 'wiki.be': ('wiki.be.npz',\n '84487d341e333344cf71bc12c7a205d923762498'),\n 'wiki.bg': ('wiki.bg.npz',\n '56f2a175b1a1d1a9cf9f1cea277cd0b46ffd7f66'),\n 'wiki.bh': ('wiki.bh.npz',\n '07473989853a344a41aaa18f41030dc56d0d01c7'),\n 'wiki.bi': ('wiki.bi.npz',\n '08adfa3c9ef3016d30ef69ea539d217ff67eda09'),\n 'wiki.bjn': ('wiki.bjn.npz',\n '998a551283222931d3a26922308449950bfa3ec7'),\n 'wiki.bm': ('wiki.bm.npz',\n '454ff9fbd4790e4a076d9a2087a51da28aa1332f'),\n 'wiki.bn': ('wiki.bn.npz',\n '1f36f6f39c9a9b33bb8035c9a4dc7e04933604fd'),\n 'wiki.bo': ('wiki.bo.npz',\n 'b9fe87318428de0a7790de175b5fec80c5af482d'),\n 'wiki.bpy': ('wiki.bpy.npz',\n '5c7853173d27e2c018c24eca69de8d5f34511b0d'),\n 'wiki.br': ('wiki.br.npz',\n '7aa66a2034fbfaa1d39e637385d48610238797c9'),\n 'wiki.bs': ('wiki.bs.npz',\n 'a019a4677677c2e9e4d899326b2b6c15ad6c011a'),\n 'wiki.bug': ('wiki.bug.npz',\n '09ae3477941d7a99d1df494368d7efb0b2c18913'),\n 'wiki.bxr': ('wiki.bxr.npz',\n 'b832c691b8ddd95896c052d3d15e1f98d72068d5'),\n 'wiki.ca': ('wiki.ca.npz',\n '391e0d4daad08649251274fa1cc2a5f49c7728b1'),\n 'wiki.cbk_zam': ('wiki.cbk_zam.npz',\n '02e57a763bc9f9eadaba57953383dd12a0a78a37'),\n 'wiki.cdo': ('wiki.cdo.npz',\n 'd6e8f422327e8b2273f1f2662d793707ece6695d'),\n 'wiki.ceb': ('wiki.ceb.npz',\n '23bc0bb9aeaa57dff35092766941a866de142aae'),\n 'wiki.ce': ('wiki.ce.npz',\n '182b2a889256119a6d379d501c55c7621e5855db'),\n 'wiki.ch': ('wiki.ch.npz',\n '82dd77512fcb463481f43c9cef3507e2baa90d7b'),\n 'wiki.cho': ('wiki.cho.npz',\n 'b0b620fc2442d1a6e2440e71a424861c80175f0c'),\n 'wiki.chr': ('wiki.chr.npz',\n '3d62c6b95c5af46abd6234426ae760cca65d5bd0'),\n 'wiki.chy': ('wiki.chy.npz',\n '34a28a22da79aebc100e3714b825c95c8d5f54a3'),\n 'wiki.ckb': ('wiki.ckb.npz',\n 'ad19461e4be583d08b7693ff5b1e9d590ed41add'),\n 'wiki.co': ('wiki.co.npz',\n 'fa60d9f0e79f1c7e15f381aef983a0f4f31c05a8'),\n 'wiki.crh': ('wiki.crh.npz',\n '540270ba6edd9d7b2f7efca52b3b407524ac67d1'),\n 'wiki.cr': ('wiki.cr.npz',\n 'f06b77465a38ec960d7d5a7554b848c37e945c76'),\n 'wiki.csb': ('wiki.csb.npz',\n 'b8b28559cf2541341af98e2aa755856765bdeabf'),\n 'wiki.cs': ('wiki.cs.npz',\n '19881e931fe06abf341450f00c342d364313e232'),\n 'wiki.cu': ('wiki.cu.npz',\n '731e0d00abd53bc2a8eb6cf37f6ab883cff34e15'),\n 'wiki.cv': ('wiki.cv.npz',\n 'e60034fcffb7dfef7b236ddba1194c3aa20b7967'),\n 'wiki.cy': ('wiki.cy.npz',\n '5a0fb967b5556f007c0d5065f951a3d3b1c1005a'),\n 'wiki.da': ('wiki.da.npz',\n 'd06258014ba2c7450bc2d55edfdf1731433e42e5'),\n 'wiki.de': ('wiki.de.npz',\n 'a21694dfd2af63bd7bb00f0b60b28e88bd1153f1'),\n 'wiki.diq': ('wiki.diq.npz',\n '4f6c77a86b39834a7130419967759afd8cc26b84'),\n 'wiki.dsb': ('wiki.dsb.npz',\n 'e74f1d346a8db96987bff0c33ee5f886907c380a'),\n 'wiki.dv': ('wiki.dv.npz',\n '5d6fe6f0eec2e7704121d5aba03b4edbb28af873'),\n 'wiki.dz': ('wiki.dz.npz',\n '77c639d36d0355b2de5adead7996eae342b852a6'),\n 'wiki.ee': ('wiki.ee.npz',\n '4b5a76127d57515d3e8a76787cdefde5856b754a'),\n 'wiki.el': ('wiki.el.npz',\n 'a00bcb97e7898931196a1c69f7a492e5b6202661'),\n 'wiki.eml': ('wiki.eml.npz',\n 'b475d626b3d97e7a68c02827fdc7900599e838c6'),\n 'wiki.en': ('wiki.en.npz',\n 'ad5ec6d49db6c6fe76b8e85ff05d34e5d0e1eb6a'),\n 'wiki.eo': ('wiki.eo.npz',\n '18049b0010520d13e676f5a82e8bb90153d99003'),\n 'wiki.es': ('wiki.es.npz',\n 'a6d192ba7d82d762f8367e75ca951aad4d11e410'),\n 'wiki.et': ('wiki.et.npz',\n '4beb7025cf88f1aa62d025b187f0cb09aee61858'),\n 'wiki.eu': ('wiki.eu.npz',\n '5e1a8197e35f20a2476798bbb935b4c131289c4f'),\n 'wiki.ext': ('wiki.ext.npz',\n '049b2d1b0a8b102b45907cf487cac30aa294e0a0'),\n 'wiki.fa': ('wiki.fa.npz',\n '81ed274997c87ef87d73d25e166ca06272ce426f'),\n 'wiki.ff': ('wiki.ff.npz',\n '4867dc74cd53ca0b0f769af4fa1ea420406b59bf'),\n 'wiki.fi': ('wiki.fi.npz',\n '6d1291b854045179f8171ac7d62ede7d8ac159a2'),\n 'wiki.fiu_vro': ('wiki.fiu_vro.npz',\n 'dd87806d9dc8833fa0e21e35a50815ebdbaa6c8b'),\n 'wiki.fj': ('wiki.fj.npz',\n 'cf5c31b0a69276f5dd18ab738ed92444abaeb755'),\n 'wiki.fo': ('wiki.fo.npz',\n 'ffc19807d528af000861a94cfb8097bd686e14fc'),\n 'wiki.fr': ('wiki.fr.npz',\n '8f06d5dbe3cf7214354fe9b2f6eca0ef7419f063'),\n 'wiki.frp': ('wiki.frp.npz',\n 'c8b200ae592478d3cd0bfaafcd7aa19de8a3bfe5'),\n 'wiki.frr': ('wiki.frr.npz',\n 'fa5e5c39ea2a45793c679eacea290a35e37405ea'),\n 'wiki.fur': ('wiki.fur.npz',\n 'a61a8940d059f25000e3fe23933e5ed0d37e65d3'),\n 'wiki.fy': ('wiki.fy.npz',\n '46f9f41bdf6f4fb8e27a753290413d745465963b'),\n 'wiki.gag': ('wiki.gag.npz',\n '49fb01230e6803544122d47ab7d3fe694d1444f2'),\n 'wiki.gan': ('wiki.gan.npz',\n '716b7b26acc15975f30caf3c6effa111516fcca5'),\n 'wiki.ga': ('wiki.ga.npz',\n 'ea934bc1fdc1acf6caf9ac746c6c499251f1fdee'),\n 'wiki.gd': ('wiki.gd.npz',\n '597017b5a32d933f194595d3656f858e37e70a62'),\n 'wiki.glk': ('wiki.glk.npz',\n '91a5834658bc2d48714e8807ef24efb79567b4b5'),\n 'wiki.gl': ('wiki.gl.npz',\n '2fa8e48d6ae1e9c9d542eb3f2156cf9e359e66c2'),\n 'wiki.gn': ('wiki.gn.npz',\n 'e359eef3928e1f1b5d8fcf0ea532e8794c66289a'),\n 'wiki.gom': ('wiki.gom.npz',\n '8cd361481c23f7545cc2bd8f1bf22aa7400edd4d'),\n 'wiki.got': ('wiki.got.npz',\n 'd05daf105611150695e61775fdff2c500b36be3f'),\n 'wiki.gu': ('wiki.gu.npz',\n '0ce175c5fc39bab4032892f70c9d2bb850af0f4a'),\n 'wiki.gv': ('wiki.gv.npz',\n '2c573f873d607831ff01b64603c17b8db79bd7e1'),\n 'wiki.hak': ('wiki.hak.npz',\n 'e6048727799cdf149f5c50037e0fc59300d33a94'),\n 'wiki.ha': ('wiki.ha.npz',\n 'f18ea7286bbd390c5470896b2c99cb1adc740064'),\n 'wiki.haw': ('wiki.haw.npz',\n '18bcd85d2e06b1b889f0835fc5b62697fdf32d72'),\n 'wiki.he': ('wiki.he.npz',\n '76915ff167b6ecb7b7e22ff0ca46914a55d344af'),\n 'wiki.hif': ('wiki.hif.npz',\n '12153aaf98d76d5502ab77a27cd0b9a539f61513'),\n 'wiki.hi': ('wiki.hi.npz',\n '249666a598991f6ec147954c6af9e531fd1cd94e'),\n 'wiki.ho': ('wiki.ho.npz',\n '3f804fd69780c0789708b56ea9d48715f8e38f26'),\n 'wiki.hr': ('wiki.hr.npz',\n '9a3de28e69f97048bfb480b4f83eaab6149f66ad'),\n 'wiki.hsb': ('wiki.hsb.npz',\n '7070bf64e13299dd66ac0e9f8e24011a56b6bfe8'),\n 'wiki.ht': ('wiki.ht.npz',\n 'a607093d511afeb584d02dc676bc5a27eff66287'),\n 'wiki.hu': ('wiki.hu.npz',\n '9b2c4750daf1bcf39768572e874b5afda0e2f0bc'),\n 'wiki.hy': ('wiki.hy.npz',\n 'ec0461a102a6fb00bd324f66cefd3c8d55a7093a'),\n 'wiki.hz': ('wiki.hz.npz',\n '5dfb8afbdae6b4148c3e55ab459c56a74b46b463'),\n 'wiki.ia': ('wiki.ia.npz',\n '4cfaaf053b9513bbf5b2423258c0f01d20256de6'),\n 'wiki.id': ('wiki.id.npz',\n 'bace396bb9941cc9e5b2e5f5a19be6db833c5fd4'),\n 'wiki.ie': ('wiki.ie.npz',\n '1bae7256c2e763ce6d692d1c0a603d99a8b22826'),\n 'wiki.ig': ('wiki.ig.npz',\n '23128e54a5e143891d392d621723bad9cfc8cf7b'),\n 'wiki.ii': ('wiki.ii.npz',\n '54bc16d05da512481865a89ecf30260b0acc04dc'),\n 'wiki.ik': ('wiki.ik.npz',\n 'f8015227e893d2375699b7d132b306ba381f02ac'),\n 'wiki.ilo': ('wiki.ilo.npz',\n '185a11f81bd5d24a34558dda81ee4735f5ba150b'),\n 'wiki.io': ('wiki.io.npz',\n 'ddf8180a90aa6ee5be93a2582cc99c535f21363e'),\n 'wiki.is': ('wiki.is.npz',\n '968f8dd2a093b279a6f7aaa734008454bf51d724'),\n 'wiki.it': ('wiki.it.npz',\n 'fdfb857a309b2c3d29482bb5cc55f21b858d2e6f'),\n 'wiki.iu': ('wiki.iu.npz',\n 'fa8896730bd6c24c3473daa22116d1016294e7f7'),\n 'wiki.jam': ('wiki.jam.npz',\n 'a8f0d0b99c89ace0a6401b8fcda261d06065faaf'),\n 'wiki.ja': ('wiki.ja.npz',\n '8d42e5a40e4d1d8645b2d80b873a65cadcf68b5c'),\n 'wiki.jbo': ('wiki.jbo.npz',\n '145fc999ab004b348cf9bf445f0a93a7a145308b'),\n 'wiki.jv': ('wiki.jv.npz',\n '66978770bf06e42414395cf5fd8c596044d72bec'),\n 'wiki.kaa': ('wiki.kaa.npz',\n '624a640ecb9901b2aba2e9f44ab615146ecb2862'),\n 'wiki.kab': ('wiki.kab.npz',\n 'e97f93b6ba65e95c85b7541932cf53c5ad9eb896'),\n 'wiki.ka': ('wiki.ka.npz',\n '1ca8376e1e0cbd58001c1b51a2d488a2874a6743'),\n 'wiki.kbd': ('wiki.kbd.npz',\n 'f2d2a05b06723ac549784ad5470d84f5742a1352'),\n 'wiki.kg': ('wiki.kg.npz',\n 'fa7f6d5f660a173a3e75342d449980eedcdc789e'),\n 'wiki.ki': ('wiki.ki.npz',\n '21a8c7c616c0050c51c288861f3423f313e4f634'),\n 'wiki.kj': ('wiki.kj.npz',\n 'f3c347509a0d81f4f7fdbb8b22889b8d76e5014e'),\n 'wiki.kk': ('wiki.kk.npz',\n 'bc24a3289e1c1e18e16b6789c2f9f92af1e73071'),\n 'wiki.kl': ('wiki.kl.npz',\n 'b8b7e7359f067836e2be2ecfe9f35a820b00fe1d'),\n 'wiki.km': ('wiki.km.npz',\n 'e053799fd01463808432dc035bef3e36620e2f36'),\n 'wiki.kn': ('wiki.kn.npz',\n '2849a0a8b3453e9bf6af05d4c7bd3db881dd1068'),\n 'wiki.koi': ('wiki.koi.npz',\n 'a9b02e9bd41833bcd54769f94626019c03f29997'),\n 'wiki.ko': ('wiki.ko.npz',\n '764d9896e74b5a26c6884d48bce3bed8ed3a7822'),\n 'wiki.krc': ('wiki.krc.npz',\n 'bfe39598c718f1cc95909db7544b3214b308a97c'),\n 'wiki.kr': ('wiki.kr.npz',\n '1e6af853d4a8ea7830e116eb9b61ac5d7d9a315c'),\n 'wiki.ksh': ('wiki.ksh.npz',\n '66cd0e3e0a0b0282a13960571ebe7cddd7706bf2'),\n 'wiki.ks': ('wiki.ks.npz',\n '85f1adaa05b854df4dede745a1aaab3836e60770'),\n 'wiki.ku': ('wiki.ku.npz',\n 'faf90584e5a45e6d0f9eeb88399b82abe037d584'),\n 'wiki.kv': ('wiki.kv.npz',\n '9f2b41822013a412da9c99fac06eed8be03ca192'),\n 'wiki.kw': ('wiki.kw.npz',\n '3eed8a8fc97a2fc79241b8474a458c98d00fc897'),\n 'wiki.ky': ('wiki.ky.npz',\n '0116ff90f10a6c0728e1ea86d8a44896ea83270a'),\n 'wiki.lad': ('wiki.lad.npz',\n '5af2015b3d1c5e8563f0e92721580988ebe2ce50'),\n 'wiki.la': ('wiki.la.npz',\n '7143303a3ea13c7668eb90ea6e3d2ca69857a3be'),\n 'wiki.lbe': ('wiki.lbe.npz',\n 'f206a3c35a184ba5d2b32ee68640eadf66c847da'),\n 'wiki.lb': ('wiki.lb.npz',\n '143dc6337f3690379282034c460c613d7f144923'),\n 'wiki.lez': ('wiki.lez.npz',\n 'b29a680decc6b29f24e8eb9e4f8e11e3419d45f1'),\n 'wiki.lg': ('wiki.lg.npz',\n '866640ce62cedbc1d453b7ea3c289c291ad76e13'),\n 'wiki.lij': ('wiki.lij.npz',\n '0dcd3d7009ae89b1016ca6cdb99a9f0d70bc4baf'),\n 'wiki.li': ('wiki.li.npz',\n '4666b3c238256d7b7623a136db19b8b9f4754734'),\n 'wiki.lmo': ('wiki.lmo.npz',\n 'ac89fa7cfe0675950bcb31c66bf3f88a3cfc98f0'),\n 'wiki.ln': ('wiki.ln.npz',\n 'fba158719944aabe58e0002a90be0ed77e11702d'),\n 'wiki.lo': ('wiki.lo.npz',\n '1e113e340a8a93d385e14502c9c4e3bcdf6c3101'),\n 'wiki.lrc': ('wiki.lrc.npz',\n '42cb755f398fba6f0da7949c91e92b55654bd482'),\n 'wiki.ltg': ('wiki.ltg.npz',\n '182f75859e228d1162215f28fe7f2dca127624a4'),\n 'wiki.lt': ('wiki.lt.npz',\n '66aa944bd2e777cb82d6d59b1f2f837b6c48cb37'),\n 'wiki.lv': ('wiki.lv.npz',\n '2be8f926da85694fa998bf79d80b61ebb8d67576'),\n 'wiki.mai': ('wiki.mai.npz',\n 'b8a9c36e2a0f1bb84a44dc762250d2a9007ef637'),\n 'wiki.map_bms': ('wiki.map_bms.npz',\n '6f0394d6b3d08a946e3df4b9355efe94148f018a'),\n 'wiki.mdf': ('wiki.mdf.npz',\n '774ee35334641db57f9ac9069961c5372a5d92e8'),\n 'wiki.mg': ('wiki.mg.npz',\n '496c48ef668f08ce95ebb11ce1ce5026b52d935c'),\n 'wiki.mh': ('wiki.mh.npz',\n '352edd84f99c5aa277a7306f6cacea1fab065ed3'),\n 'wiki.mhr': ('wiki.mhr.npz',\n 'dd78b27a674ac10411cdf74ac32f9391506b17e0'),\n 'wiki.min': ('wiki.min.npz',\n '628b406441ab03bc8aa68195ada50bfdc8226f34'),\n 'wiki.mi': ('wiki.mi.npz',\n '754127b473861cd4f9ae034c9f527a34827b1f00'),\n 'wiki.mk': ('wiki.mk.npz',\n 'b09fed4f56c296f13c4020ef1fec498382a38b73'),\n 'wiki.ml': ('wiki.ml.npz',\n '02fb55d97ca2f0408f0e7e8dd6a661bbc3319a2a'),\n 'wiki.mn': ('wiki.mn.npz',\n '08b2c45689aa5d9ec49df96dc7c777ce9b9a0b4b'),\n 'wiki.mo': ('wiki.mo.npz',\n '638c2e8bd2352fd52921b9ae62f578b8357bab49'),\n 'wiki.mrj': ('wiki.mrj.npz',\n 'ec5cf1f4fb8dfdca64d8172974e620eb8fa41626'),\n 'wiki.mr': ('wiki.mr.npz',\n '074dd68c947c2f137a3e84b55012925f00213139'),\n 'wiki.ms': ('wiki.ms.npz',\n '3dbe9e9d70251de8a374776ff1250a9c3103ee59'),\n 'wiki.mt': ('wiki.mt.npz',\n 'f5103998a68d1b178387417436a83123d44aba01'),\n 'wiki.multi.ar': ('wiki.multi.ar.npz',\n 'a010d1d81a465c56ebaf596b3e8e8795e7f0f8e3'),\n 'wiki.multi.bg': ('wiki.multi.bg.npz',\n 'c04018f3a600cee170f12a36cdd35b4727a2aade'),\n 'wiki.multi.ca': ('wiki.multi.ca.npz',\n 'eef52a0cf20c133ca9065de25f0702861a8cfa29'),\n 'wiki.multi.cs': ('wiki.multi.cs.npz',\n 'c5f547aa78c0e3d7dae67a0334d500bf2a86aa30'),\n 'wiki.multi.da': ('wiki.multi.da.npz',\n '24374f2ee169b33327feeee46da31b0de1622fe4'),\n 'wiki.multi.de': ('wiki.multi.de.npz',\n '2e6c119b345bebd34b56eaaf855d6703889b11f7'),\n 'wiki.multi.el': ('wiki.multi.el.npz',\n '9d122beedb80a2e5334946641e5bafd32c01e76b'),\n 'wiki.multi.en': ('wiki.multi.en.npz',\n '8c3c480b4cb2690304173713a646280613b244a8'),\n 'wiki.multi.es': ('wiki.multi.es.npz',\n '483a22656e4fb2a01e9f4ef8156b261e780850ab'),\n 'wiki.multi.et': ('wiki.multi.et.npz',\n '22498c7b91645a3874fa738b5cfb16bf98b6f97c'),\n 'wiki.multi.fi': ('wiki.multi.fi.npz',\n '765a6f0b63777bff4ae6ca2b461c5889c03d6a70'),\n 'wiki.multi.fr': ('wiki.multi.fr.npz',\n 'decd9aacf600114b8a36072535c0309874a37c83'),\n 'wiki.multi.he': ('wiki.multi.he.npz',\n '7eee940c1b85936f59122f4b1a166223dd946674'),\n 'wiki.multi.hr': ('wiki.multi.hr.npz',\n '1673963416af088f8bf15576afb33d58115db35c'),\n 'wiki.multi.hu': ('wiki.multi.hu.npz',\n 'a1fbe6ededf3cbaa3eaa22dd8b20cce4b36cfc6d'),\n 'wiki.multi.id': ('wiki.multi.id.npz',\n '6c3e721febb511ede7db7bf978d65769e4270f5c'),\n 'wiki.multi.it': ('wiki.multi.it.npz',\n 'fc5bfc11e0165e8d95c1708573dad5e456826c73'),\n 'wiki.multi.mk': ('wiki.multi.mk.npz',\n '6cd50198355674f156fc863108d9bebf11cfabd9'),\n 'wiki.multi.nl': ('wiki.multi.nl.npz',\n '4fa06b9230c95dfa5a9e9a5d80f1f5ba614d3cbf'),\n 'wiki.multi.no': ('wiki.multi.no.npz',\n '63756168c1101e73fba8d1a5015f32b8892819e6'),\n 'wiki.multi.pl': ('wiki.multi.pl.npz',\n '958b8e8bead965ba1bb1433e1c960fc3e12a10fb'),\n 'wiki.multi.pt': ('wiki.multi.pt.npz',\n '22f07df1609d79b95344ee575ea43141424a1528'),\n 'wiki.multi.ro': ('wiki.multi.ro.npz',\n '73180b3e382519004bf38ea7b86237aacbbe813a'),\n 'wiki.multi.ru': ('wiki.multi.ru.npz',\n '3b2eb9163f35e90bf2ce1cd3c997b354d0c34f59'),\n 'wiki.multi.sk': ('wiki.multi.sk.npz',\n '606a0c3ba9849070c6b6b8c22d920fdeed9a1385'),\n 'wiki.multi.sl': ('wiki.multi.sl.npz',\n '3cfdab5043b8cfe1535cb6dbd4c9e68847ad5904'),\n 'wiki.multi.sv': ('wiki.multi.sv.npz',\n '4f1494885b9a831e87cfa3c15f2204c4a73c0779'),\n 'wiki.multi.tr': ('wiki.multi.tr.npz',\n '54f90d5ddb9a65538a41e37c5a67ed933a5e4885'),\n 'wiki.multi.uk': ('wiki.multi.uk.npz',\n '500fd26b1d7a25b42458012e99f9f76642e0c787'),\n 'wiki.multi.vi': ('wiki.multi.vi.npz',\n '3955809cceb300965c15f9372221417719bb0db8'),\n 'wiki.mus': ('wiki.mus.npz',\n 'a5f48934a3fa6eaf4929098046c93fc94dd6bcb6'),\n 'wiki.mwl': ('wiki.mwl.npz',\n '8a5e2c272166f8a72c5694ca6c3104d5f49179ec'),\n 'wiki.my': ('wiki.my.npz',\n '5e035aca16700d7d6695af8a6d3a88ac847aaeb7'),\n 'wiki.myv': ('wiki.myv.npz',\n 'd4cfaab70c640033e02c0fc0c5a3615ae836c569'),\n 'wiki.mzn': ('wiki.mzn.npz',\n 'ad09ac584ae455b5862b95125ef409360ae18445'),\n 'wiki.nah': ('wiki.nah.npz',\n '2dc454ef37d059f2053af46cfa1f4f0ca939cba0'),\n 'wiki.na': ('wiki.na.npz',\n '401f0f880eb7aa78d21348bc1e0a3953b3e81bf0'),\n 'wiki.nap': ('wiki.nap.npz',\n '996da46aeeab5644ba766d00c5e343b1553361d7'),\n 'wiki.nds_nl': ('wiki.nds_nl.npz',\n '5a9307e16b13a5a82ec19a52b33254537e7198e7'),\n 'wiki.nds': ('wiki.nds.npz',\n 'b249a87c78c52becf51e7b50aaf9f9b6a36585f1'),\n 'wiki.ne': ('wiki.ne.npz',\n 'a601db2647a74ffd2b4b43dcb8584735f555459c'),\n 'wiki.new': ('wiki.new.npz',\n 'c398a3775aba9c68ce765cfdfb6b188f7c47e4c6'),\n 'wiki-news-300d-1M': ('wiki-news-300d-1M.npz',\n '0a03bbd508e5381e140476140fb121afeb0050ed'),\n 'wiki-news-300d-1M-subword': ('wiki-news-300d-1M-subword.npz',\n '69edae21375407781c727dcb9e534e79d712d137'),\n 'wiki.ng': ('wiki.ng.npz',\n 'befd774d15f69d43547e13e5ea3a97c4cb1ab405'),\n 'wiki.nl': ('wiki.nl.npz',\n '5a7cb6f1dd0a7621202abba9461ac2c5bf905219'),\n 'wiki.nn': ('wiki.nn.npz',\n '8e5059ddeb24050fadaa5cc4622b13feb3e4a226'),\n 'wiki.no': ('wiki.no.npz',\n '5ce6e0f793e66f081652f64013968099de03d9f9'),\n 'wiki.nov': ('wiki.nov.npz',\n '95ed23b4cfd7a65afa1c12c7dbdce6af53923d77'),\n 'wiki.vec': ('wiki.vec.npz',\n '08ebb912efeb9df1c7d05e1af90484d210dff47e'),\n 'wiki.nrm': ('wiki.nrm.npz',\n 'e58614b4508ff9810f0b58fd818f973775bc918d'),\n 'wiki.nso': ('wiki.nso.npz',\n '56a2ebe260241402d117cd89c5c872b9c96ff05b'),\n 'wiki.nv': ('wiki.nv.npz',\n 'c713051fe03ec1f60314bb42161b2a47fb5e169a'),\n 'wiki.ny': ('wiki.ny.npz',\n 'ba5a1725955cbc13e7fd93ab499f8085840c992c'),\n 'wiki.oc': ('wiki.oc.npz',\n '259e7d994c38a4cfc140fb07016b82d6781e5027'),\n 'wiki.olo': ('wiki.olo.npz',\n '0fea70f887def4779ee70a79366b88f1ada65004'),\n 'wiki.om': ('wiki.om.npz',\n '47e2d756b5f8913085d901375c1b4e0b118a4221'),\n 'wiki.or': ('wiki.or.npz',\n '7e274ab060219b019aa02bb97941cc6e162fd01f'),\n 'wiki.os': ('wiki.os.npz',\n '19e8199cc2aaffdb07b6c558dbc5465ac6e03155'),\n 'wiki.pag': ('wiki.pag.npz',\n 'eddf4931547649026c02f893297ef673ec6158bb'),\n 'wiki.pam': ('wiki.pam.npz',\n '40109aa174bd9f0fa657839bb548e2b0646c58d3'),\n 'wiki.pa': ('wiki.pa.npz',\n '8a5870717e9e641b1f757f13259171698118de2e'),\n 'wiki.pap': ('wiki.pap.npz',\n '999c8e5b005ca20d9998fbbe4fa79177f69e24c0'),\n 'wiki.pcd': ('wiki.pcd.npz',\n 'e975066b323a65cdc5e4c27138ef674d2cf7250b'),\n 'wiki.pdc': ('wiki.pdc.npz',\n '5c770b9d56f276b0aa535845f175c05ee1cea615'),\n 'wiki.pfl': ('wiki.pfl.npz',\n '0063d0b633ee529a75482b36ed4f4da7d64994ec'),\n 'wiki.pih': ('wiki.pih.npz',\n 'ce1d76c94d248545eea0d7436c54849dbb380bfc'),\n 'wiki.pi': ('wiki.pi.npz',\n 'c7d56c334bf529f8b3655693d207a80feaec4aed'),\n 'wiki.pl': ('wiki.pl.npz',\n '0d612fdf871a1a4084c867f394940475be899443'),\n 'wiki.pms': ('wiki.pms.npz',\n 'ca149a2fb138011315bb6d5d61c7a5647e515e51'),\n 'wiki.pnb': ('wiki.pnb.npz',\n '9ec82d02ad8894056c67991cf8ce927bcca74ee2'),\n 'wiki.pnt': ('wiki.pnt.npz',\n '3f90123407bb8fc838a0a0d3700a14e15f5b26aa'),\n 'wiki.ps': ('wiki.ps.npz',\n '7edebc02ac16f5fab83eb10b7d0fab821a9a4d43'),\n 'wiki.pt': ('wiki.pt.npz',\n 'f172fd801edd1ad9d319ba44146d40b5d682a473'),\n 'wiki.qu': ('wiki.qu.npz',\n '68bec60ccfe1826c3b3a8968574488dbc74cdf7b'),\n 'wiki.rm': ('wiki.rm.npz',\n '00fb191fc736ba60cb23e76169dfccde9a9daad0'),\n 'wiki.rmy': ('wiki.rmy.npz',\n 'c5e93cc37ff7293b9a1d9fe55c42d6fbde372b97'),\n 'wiki.rn': ('wiki.rn.npz',\n '57b8e0d6999269be227af6ef2797a9cf8386ff1b'),\n 'wiki.roa_rup': ('wiki.roa_rup.npz',\n 'e06d6b5672a59bb9e83143bc8b28300d23c09546'),\n 'wiki.roa_tara': ('wiki.roa_tara.npz',\n 'c083105f40236dc3711f06c1b40e8ee7a714b99d'),\n 'wiki.ro': ('wiki.ro.npz',\n '766bc0cb58a65b0b1763b9a0d90e91ab982eb20d'),\n 'wiki.rue': ('wiki.rue.npz',\n '9a91fa093cd48d7d658d526b0ccda48dc59cd7f4'),\n 'wiki.ru': ('wiki.ru.npz',\n 'd59d099481c22d5592ab9635c9ee48060aa0bf45'),\n 'wiki.rw': ('wiki.rw.npz',\n 'e99ee87d249f6c157c5c97397d1025d798b85c69'),\n 'wiki.sah': ('wiki.sah.npz',\n '85dae39097b29bc8e2b64f343a77794e4a62f91a'),\n 'wiki.sa': ('wiki.sa.npz',\n '7d1928d7c67400045ac1b35a37a0e3089690d875'),\n 'wiki.scn': ('wiki.scn.npz',\n '27d7b8050bbeed8ce196061c610216760b053c39'),\n 'wiki.sc': ('wiki.sc.npz',\n '69c7b8be0f03a1bbd615695f93bdd78f96a58e16'),\n 'wiki.sco': ('wiki.sco.npz',\n '4880282f59d3338b67fbff75359e2d24896e95bb'),\n 'wiki.sd': ('wiki.sd.npz',\n '0ed8da4d27223db717a612cf0c88582351db6e19'),\n 'wiki.se': ('wiki.se.npz',\n '0f4b2e060d5e29f96ca73aab29c967e79db69c17'),\n 'wiki.sg': ('wiki.sg.npz',\n 'a5e4edf34fe1a88b322da4c3922ec5a470e200c6'),\n 'wiki.sh': ('wiki.sh.npz',\n 'c13f1e94676bc939560193f7aa7ffd7d604707b3'),\n 'wiki.simple': ('wiki.simple.npz',\n '352d0575e7d60b08e1dfce2c5de713906f0ed78f'),\n 'wiki.si': ('wiki.si.npz',\n '204f9ffbe7770a9f56d3b2fb26999165015f5c33'),\n 'wiki.sk': ('wiki.sk.npz',\n '7a9820b5a343b242660bf2595d1ecbf6e00a76d6'),\n 'wiki.sl': ('wiki.sl.npz',\n '85f3186f26d6725317a64e290363a7251b928b81'),\n 'wiki.sm': ('wiki.sm.npz',\n '9e13452cc4bff677f4f15db04f9d2f95f6ec054c'),\n 'wiki.sn': ('wiki.sn.npz',\n 'e8d5f7dcf51280c5f99bc3df849b4889a61e9fcd'),\n 'wiki.so': ('wiki.so.npz',\n '0f5d71b95768b33fd939a870c15344c4478364a9'),\n 'wiki.sq': ('wiki.sq.npz',\n '8b05826df8575e65c87a2fc0b7630cf644d4216d'),\n 'wiki.srn': ('wiki.srn.npz',\n '2711396ef297ac5dde8904508bc002bdecbcc6f4'),\n 'wiki.sr': ('wiki.sr.npz',\n '546edc8e29a5d2e99ed10eb4a552cbef2bb8f417'),\n 'wiki.ss': ('wiki.ss.npz',\n '2e5911bad79bb5270a64f587e326d31c95ec58f3'),\n 'wiki.st': ('wiki.st.npz',\n '23bc954719a2962e891f02efaea754c9ea025894'),\n 'wiki.stq': ('wiki.stq.npz',\n 'dd3ece0c0aa30e53ae0f4b558309bb60ab628652'),\n 'wiki.su': ('wiki.su.npz',\n '7e48732e8a1fcf212e692924a4416a6ac3b3b055'),\n 'wiki.sv': ('wiki.sv.npz',\n 'b9ec52e9423688f195f3145c243226c0e0b51e83'),\n 'wiki.sw': ('wiki.sw.npz',\n '5262f0c645322b10eca73f792a970f10b2719e55'),\n 'wiki.szl': ('wiki.szl.npz',\n 'fdd6d6b291cdbbcec5ff93451a588fdd103bb2d0'),\n 'wiki.ta': ('wiki.ta.npz',\n 'da7c5bc6e1142306ff2669bf1739832beb6c1763'),\n 'wiki.tcy': ('wiki.tcy.npz',\n 'baa49e1afa2bb0dcaaef0fac1ee75bbe711d1134'),\n 'wiki.te': ('wiki.te.npz',\n 'baf48767ce85e4d41d65d25f2bbf1c5f559ec18f'),\n 'wiki.tet': ('wiki.tet.npz',\n '11e46a893af55344dbe102d530fdfea5d949d3bc'),\n 'wiki.tg': ('wiki.tg.npz',\n 'da66abb72ec9ccc602713161e544963d59cc51d7'),\n 'wiki.th': ('wiki.th.npz',\n '25e54bf2d305779ec9baa5f344410bd75c7702fc'),\n 'wiki.ti': ('wiki.ti.npz',\n '1faf98f3a0eafa7559a4b2a111f43dd1f7b9a05b'),\n 'wiki.tk': ('wiki.tk.npz',\n '34c714fa8275fd6abfe86b2d144a043774552a6c'),\n 'wiki.tl': ('wiki.tl.npz',\n '7d7f8a0485155bce7a74a1d778824375b0029f53'),\n 'wiki.tn': ('wiki.tn.npz',\n 'd0bc3a9b948753ac2283e5e10480c9fa0f6acb53'),\n 'wiki.to': ('wiki.to.npz',\n 'e982fc31bcfcf7339988d7aad21ce29ac9e84b0b'),\n 'wiki.tpi': ('wiki.tpi.npz',\n '448cef043fa4b7f97825dbf8ee205ef05543bcac'),\n 'wiki.tr': ('wiki.tr.npz',\n 'c9830607a4c5134c6191006f1d80bae0ec798fe6'),\n 'wiki.ts': ('wiki.ts.npz',\n '84a0598803712c8a713943447ddb73fc0f39af43'),\n 'wiki.tt': ('wiki.tt.npz',\n '82c29df18f33e6284af3e977a6dda7e132a7a225'),\n 'wiki.tum': ('wiki.tum.npz',\n '358990b894a3fb09d70674465952d828c9b0eda7'),\n 'wiki.tw': ('wiki.tw.npz',\n '1e6d2838a4f271c1808795fb929cfcbf95094d93'),\n 'wiki.ty': ('wiki.ty.npz',\n 'e41ca5192d8cb515b3561c8d6935b150deb027b7'),\n 'wiki.tyv': ('wiki.tyv.npz',\n 'ce062ed32e854604714b65698ae290c99ba28060'),\n 'wiki.udm': ('wiki.udm.npz',\n '9e1c5891ee0c5ac8f65fc457e1b42c7b2bfc8d37'),\n 'wiki.ug': ('wiki.ug.npz',\n '656503e54063e200980e39f00fc011395bcd8551'),\n 'wiki.uk': ('wiki.uk.npz',\n '352b7ee24d9fc6513fff4fe13bc04086c680834a'),\n 'wiki.ur': ('wiki.ur.npz',\n 'a81e55c7adfc2cef779ce9a01fe21319a7e4943b'),\n 'wiki.uz': ('wiki.uz.npz',\n 'd60d1e67bb8574dd71c18c88114aba674fc1eecb'),\n 'wiki.ve': ('wiki.ve.npz',\n '5bfc3dbb3e47d23597df47ef12bd1c64ab8d3ea9'),\n 'wiki.vep': ('wiki.vep.npz',\n '7a94355754fbe56802242c0bf9d7a27335095552'),\n 'wiki.vi': ('wiki.vi.npz',\n 'f118039eb16a4ca3347b6b171eac41113350a041'),\n 'wiki.vls': ('wiki.vls.npz',\n '9a46a2fdc6448aa54f212081643745499ea7d05c'),\n 'wiki.vo': ('wiki.vo.npz',\n '8e2f93c85ac608bcc4ae14093b9ff016061378fb'),\n 'wiki.wa': ('wiki.wa.npz',\n '907074f7743d30cdbb2c48d0c8b4040796ea4164'),\n 'wiki.war': ('wiki.war.npz',\n '928fb410c394b9c18d875326b6a3e750e2611e1b'),\n 'wiki.wo': ('wiki.wo.npz',\n '7bb352be44f7261aa926f49b13e77df30f29312f'),\n 'wiki.wuu': ('wiki.wuu.npz',\n '0d1dc7b05867ff2156a1180ad3da3b4697924e59'),\n 'wiki.xal': ('wiki.xal.npz',\n 'd87f4a131e086dc0bdc2a7e10406820c3c03b6a9'),\n 'wiki.xh': ('wiki.xh.npz',\n 'c64e1d2e77d1c744a628e2bd7353284616e48bea'),\n 'wiki.xmf': ('wiki.xmf.npz',\n '160b9ee9773b9099aaf37ae9bdbc8a4a93b7f6ea'),\n 'wiki.yi': ('wiki.yi.npz',\n '0662542cee29f3392fc905004ac6443b32c1477c'),\n 'wiki.yo': ('wiki.yo.npz',\n '5d12d3b902a1fa19d8548295c3802c0608afa5c8'),\n 'wiki.za': ('wiki.za.npz',\n '536348ff89df62e968739b567a1245bfd4112fbe'),\n 'wiki.zea': ('wiki.zea.npz',\n '61fa192289a7c0f73ffa8035632a38b91c31c224'),\n 'wiki.zh_classical': ('wiki.zh_classical.npz',\n '9acc9eaf8ebe316b945fb1f56ac71a2b7e024854'),\n 'wiki.zh_min_nan': ('wiki.zh_min_nan.npz',\n '5d38bc025c82af578299d60f7df7b399de6ed81a'),\n 'wiki.zh': ('wiki.zh.npz',\n '94007fcf3b105bf2c21b84a3a22bdb7946e74804'),\n 'wiki.zh_yue': ('wiki.zh_yue.npz',\n 'af6f0d94e6418d528d6cedd859e07e6e2fb416ab'),\n 'wiki.zu': ('wiki.zu.npz',\n 'fc9ce07d5d0c49a3c86cf1b26056ada58f9404ca')}\n\nGOOGLEANALOGY_CATEGORIES = [\n 'capital-common-countries', 'capital-world', 'currency', 'city-in-state',\n 'family', 'gram1-adjective-to-adverb', 'gram2-opposite',\n 'gram3-comparative', 'gram4-superlative', 'gram5-present-participle',\n 'gram6-nationality-adjective', 'gram7-past-tense', 'gram8-plural',\n 'gram9-plural-verbs'\n]\n\nBATS_CHECKSUMS = \\\n {'BATS_3.0/1_Inflectional_morphology/I01 [noun - plural_reg].txt':\n 'cfcba2835edf81abf11b84defd2f4daa3ca0b0bf',\n 'BATS_3.0/1_Inflectional_morphology/I02 [noun - plural_irreg].txt':\n '44dbc56432b79ff5ce2ef80b6840a8aa916524f9',\n 'BATS_3.0/1_Inflectional_morphology/I03 [adj - comparative].txt':\n 'dc530918e98b467b8102a7dab772a66d3db32a73',\n 'BATS_3.0/1_Inflectional_morphology/I04 [adj - superlative].txt':\n '6c6fdfb6c733bc9b298d95013765163f42faf6fb',\n 'BATS_3.0/1_Inflectional_morphology/I05 [verb_inf - 3pSg].txt':\n '39fa47ec7238ddb3f9818bc586f23f55b55418d8',\n 'BATS_3.0/1_Inflectional_morphology/I06 [verb_inf - Ving].txt':\n '8fabeb9f5af6c3e7154a220b7034bbe5b900c36f',\n 'BATS_3.0/1_Inflectional_morphology/I07 [verb_inf - Ved].txt':\n 'aa04df95aa2edb436cbcc03c7b15bc492ece52d6',\n 'BATS_3.0/1_Inflectional_morphology/I08 [verb_Ving - 3pSg].txt':\n '5f22d8121a5043ce76d3b6b53a49a7bb3fe33920',\n 'BATS_3.0/1_Inflectional_morphology/I09 [verb_Ving - Ved].txt':\n '377777c1e793c638e72c010228156d01f916708e',\n 'BATS_3.0/1_Inflectional_morphology/I10 [verb_3pSg - Ved].txt':\n '051c0c3c633e10900f827991dac14cf76da7f022',\n 'BATS_3.0/2_Derivational_morphology/D01 [noun+less_reg].txt':\n '5d6839e9d34ee1e9fddb5bbf6516cf6420b85d8d',\n 'BATS_3.0/2_Derivational_morphology/D02 [un+adj_reg].txt':\n '80b82227a0d5f7377f1e8cebe28c582bfeb1afb5',\n 'BATS_3.0/2_Derivational_morphology/D03 [adj+ly_reg].txt':\n '223e120bd61b3116298a253f392654c15ad5a39a',\n 'BATS_3.0/2_Derivational_morphology/D04 [over+adj_reg].txt':\n 'a56f8685af489bcd09c36f864eba1657ce0a7c28',\n 'BATS_3.0/2_Derivational_morphology/D05 [adj+ness_reg].txt':\n '5da99b1f1781ecfb4a1a7448c715abf07451917b',\n 'BATS_3.0/2_Derivational_morphology/D06 [re+verb_reg].txt':\n '4c5e1796091fade503fbf0bfc2fae2c7f98b5dd2',\n 'BATS_3.0/2_Derivational_morphology/D07 [verb+able_reg].txt':\n 'a6218162bc257d98e875fc667c23edfac59e19fd',\n 'BATS_3.0/2_Derivational_morphology/D08 [verb+er_irreg].txt':\n '9a4236c3bbc23903e101a42fb5ad6e15e552fadf',\n 'BATS_3.0/2_Derivational_morphology/D09 [verb+tion_irreg].txt':\n '3ab0153926d5cf890cf08a4077da6d9946133874',\n 'BATS_3.0/2_Derivational_morphology/D10 [verb+ment_irreg].txt':\n '2a012b87a9a60e128e064c5fe24b60f99e16ddce',\n 'BATS_3.0/3_Encyclopedic_semantics/E01 [country - capital].txt':\n '9890315d3c4e6a38b8ae5fc441858564be3d3dc4',\n 'BATS_3.0/3_Encyclopedic_semantics/E02 [country - language].txt':\n 'ef08a00e8ff7802811ace8f00fabac41b5d03678',\n 'BATS_3.0/3_Encyclopedic_semantics/E03 [UK_city - county].txt':\n '754957101c93a25b438785bd4458404cd9010259',\n 'BATS_3.0/3_Encyclopedic_semantics/E04 [name - nationality].txt':\n '71a6562c34fb6154992a7c3e499375fcc3529c96',\n 'BATS_3.0/3_Encyclopedic_semantics/E05 [name - occupation].txt':\n 'a9a6f9f1af959aef83106f3dbd6bed16dfe9a3ea',\n 'BATS_3.0/3_Encyclopedic_semantics/E06 [animal - young].txt':\n '12d5b51c7b76b9136eadc719abc8cf4806c67b73',\n 'BATS_3.0/3_Encyclopedic_semantics/E07 [animal - sound].txt':\n '91991b007a35f45bd42bd7d0d465c6f8311df911',\n 'BATS_3.0/3_Encyclopedic_semantics/E08 [animal - shelter].txt':\n 'e5af11e216db392986ba0cbb597d861066c29adb',\n 'BATS_3.0/3_Encyclopedic_semantics/E09 [things - color].txt':\n 'd30b2eb2fc7a60f19afda7c54582e30f6fe28f51',\n 'BATS_3.0/3_Encyclopedic_semantics/E10 [male - female].txt':\n '247a588671bc1da8f615e14076bd42573d24b4b3',\n 'BATS_3.0/4_Lexicographic_semantics/L01 [hypernyms - animals].txt':\n '4b5c4dabe2c9c038fafee85d8d3958f1b1dec987',\n 'BATS_3.0/4_Lexicographic_semantics/L02 [hypernyms - misc].txt':\n '83d5ecad78d9de28fd70347731c7ee5918ba43c9',\n 'BATS_3.0/4_Lexicographic_semantics/L03 [hyponyms - misc].txt':\n 'a8319856ae2f76b4d4c030ac7e899bb3a06a9a48',\n 'BATS_3.0/4_Lexicographic_semantics/L04 [meronyms - substance].txt':\n 'c081e1104e1b40725063f4b39d13d1ec12496bfd',\n 'BATS_3.0/4_Lexicographic_semantics/L05 [meronyms - member].txt':\n 'bcbf05f3be76cef990a74674a9999a0bb9790a07',\n 'BATS_3.0/4_Lexicographic_semantics/L06 [meronyms - part].txt':\n '2f9bdcc74b881e1c54b391c9a6e7ea6243b3accc',\n 'BATS_3.0/4_Lexicographic_semantics/L07 [synonyms - intensity].txt':\n '8fa287860b096bef004fe0f6557e4f686e3da81a',\n 'BATS_3.0/4_Lexicographic_semantics/L08 [synonyms - exact].txt':\n 'a17c591961bddefd97ae5df71f9d1559ce7900f4',\n 'BATS_3.0/4_Lexicographic_semantics/L09 [antonyms - gradable].txt':\n '117fbb86504c192b33a5469f2f282e741d9c016d',\n 'BATS_3.0/4_Lexicographic_semantics/L10 [antonyms - binary].txt':\n '3cde2f2c2a0606777b8d7d11d099f316416a7224'}\n\nBATS_CATEGORIES = {\n 'I01': '[noun - plural_reg]',\n 'I02': '[noun - plural_irreg]',\n 'I03': '[adj - comparative]',\n 'I04': '[adj - superlative]',\n 'I05': '[verb_inf - 3pSg]',\n 'I06': '[verb_inf - Ving]',\n 'I07': '[verb_inf - Ved]',\n 'I08': '[verb_Ving - 3pSg]',\n 'I09': '[verb_Ving - Ved]',\n 'I10': '[verb_3pSg - Ved]',\n 'D01': '[noun+less_reg]',\n 'D02': '[un+adj_reg]',\n 'D03': '[adj+ly_reg]',\n 'D04': '[over+adj_reg]',\n 'D05': '[adj+ness_reg]',\n 'D06': '[re+verb_reg]',\n 'D07': '[verb+able_reg]',\n 'D08': '[verb+er_irreg]',\n 'D09': '[verb+tion_irreg]',\n 'D10': '[verb+ment_irreg]',\n 'E01': '[country - capital]',\n 'E02': '[country - language]',\n 'E03': '[UK_city - county]',\n 'E04': '[name - nationality]',\n 'E05': '[name - occupation]',\n 'E06': '[animal - young]',\n 'E07': '[animal - sound]',\n 'E08': '[animal - shelter]',\n 'E09': '[things - color]',\n 'E10': '[male - female]',\n 'L01': '[hypernyms - animals]',\n 'L02': '[hypernyms - misc]',\n 'L03': '[hyponyms - misc]',\n 'L04': '[meronyms - substance]',\n 'L05': '[meronyms - member]',\n 'L06': '[meronyms - part]',\n 'L07': '[synonyms - intensity]',\n 'L08': '[synonyms - exact]',\n 'L09': '[antonyms - gradable]',\n 'L10': '[antonyms - binary]'\n}\n\nSEMEVAL17_CHECKSUMS = \\\n {'SemEval17-Task2/README.txt':\n 'ad02d4c22fff8a39c9e89a92ba449ec78750af6b',\n 'SemEval17-Task2/task2-scorer.jar':\n '145ef73ce955656d59e3b67b41f8152e8ee018d8',\n 'SemEval17-Task2/test/subtask1-monolingual/data/de.test.data.txt':\n '6fc840f989d2274509549e472a68fb88dd2e149f',\n 'SemEval17-Task2/test/subtask1-monolingual/data/en.test.data.txt':\n '05293fcbd80b2f4aad9b6518ce1a546ad8f61f33',\n 'SemEval17-Task2/test/subtask1-monolingual/data/es.test.data.txt':\n '552904b5988f9951311290ca8fa0441dd4351d4b',\n 'SemEval17-Task2/test/subtask1-monolingual/data/fa.test.data.txt':\n '29d5970feac5982961bd6ab621ba31f83d3bff77',\n 'SemEval17-Task2/test/subtask1-monolingual/data/it.test.data.txt':\n 'c95fe2be8fab37e9c70610117bdedc48a0a8e95c',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/de.test.gold.txt':\n 'c51463460495a242cc726d41713c5e00b66fdd18',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/en.test.gold.txt':\n '2d2bb2ed41308cc60e7953cc9036f7dc89141b48',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/es.test.gold.txt':\n 'a5842ff17fe3847d15414924826a8eb236018bcc',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/fa.test.gold.txt':\n '717bbe035d8ae2bad59416eb3dd4feb7238b97d4',\n 'SemEval17-Task2/test/subtask1-monolingual/keys/it.test.gold.txt':\n 'a342b950109c73afdc86a7829e17c1d8f7c482f0',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-es.test.data.txt':\n 'ef92b1375762f68c700e050d214d3241ccde2319',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-fa.test.data.txt':\n '17aa103981f3193960309bb9b4cc151acaf8136c',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/de-it.test.data.txt':\n 'eced15e8565689dd67605a82a782d19ee846222a',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-de.test.data.txt':\n '5cb69370a46385a7a3d37cdf2018744be77203a0',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-es.test.data.txt':\n '402f7fed52b60e915fb1be49f935395488cf7a7b',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-fa.test.data.txt':\n '9bdddbbde3da755f2a700bddfc3ed1cd9324ad48',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/en-it.test.data.txt':\n 'd3b37aac79ca10311352309ef9b172f686ecbb80',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/es-fa.test.data.txt':\n 'a2959aec346c26475a4a6ad4d950ee0545f2381e',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/es-it.test.data.txt':\n 'ca627c30143d9f82a37a8776fabf2cee226dd35c',\n 'SemEval17-Task2/test/subtask2-crosslingual/data/it-fa.test.data.txt':\n 'a03d79a6ce7b798356b53b4e85dbe828247b97ef',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-es.test.gold.txt':\n '7564130011d38daad582b83135010a2a58796df6',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-fa.test.gold.txt':\n 'c9e23c2e5e970e7f95550fbac3362d85b82cc569',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/de-it.test.gold.txt':\n 'b74cc2609b2bd2ceb5e076f504882a2e0a996a3c',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-de.test.gold.txt':\n '428dfdad2a144642c13c24b845e6b7de6bf5f663',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-es.test.gold.txt':\n '1dd7ab08a10552486299151cdd32ed19b56db682',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-fa.test.gold.txt':\n '17451ac2165aa9b695dae9b1aba20eb8609fb400',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/en-it.test.gold.txt':\n '5041c0b84a603ed85aa0a5cbe4b1c34f69a2fa7c',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/es-fa.test.gold.txt':\n '8c09a219670dc32ab3864078bf0c28a287accabc',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/es-it.test.gold.txt':\n 'b1cdd13209354cc2fc2f4226c80aaa85558daf4a',\n 'SemEval17-Task2/test/subtask2-crosslingual/keys/it-fa.test.gold.txt':\n 'e0b560bb1d2db39ce45e841c8aad611734dc94f1',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/de.trial.data.txt':\n 'dd071fd90f59bec8d271a447d86ee2e462941f52',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/en.trial.data.txt':\n 'e8e5add0850b3dec07f102be26b8791a5e9bbbcf',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/es.trial.data.txt':\n '8956c78ff9ceae1d923a57816e55392c6a7dfc49',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/fa.trial.data.txt':\n '2f7c4247cde0d918b3508e90f6b49a1f5031c81b',\n 'SemEval17-Task2/trial/subtask1-monolingual/data/it.trial.data.txt':\n 'c11e0b5b55f94fc97c7b11fa455e71b071be879f',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/de.trial.gold.txt':\n 'ce5567b1accf3eb07da53229dfcb2a8a1dfac380',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/en.trial.gold.txt':\n '693cb5928e807c79e39136dc0981dadca7832ae6',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/es.trial.gold.txt':\n '8241ca66bf5ba55f77607e9bcfae8e34902715d8',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/fa.trial.gold.txt':\n 'd30701a93c8c5500b82ac2334ed8410f9a23864b',\n 'SemEval17-Task2/trial/subtask1-monolingual/keys/it.trial.gold.txt':\n 'bad225573e1216ba8b35429e9fa520a20e8ce031',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/de.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/en.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/es.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/fa.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask1-monolingual/output/it.trial.sample.output.txt':\n 'f85cba9f6690d61736623c16e620826b09384aa5',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-es.trial.data.txt':\n 'c27c8977d8d4434fdc3e59a7b0121d87e0a03237',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-fa.trial.data.txt':\n '88a6f6dd1bba309f7cae7281405e37f442782983',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/de-it.trial.data.txt':\n 'ebdab0859f3b349fa0120fc8ab98be3394f0d73d',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-de.trial.data.txt':\n '128d1a460fe9836b66f0fcdf59455b02edb9f258',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-es.trial.data.txt':\n '508c5dde8ffcc32ee3009a0d020c7c96a338e1d1',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-fa.trial.data.txt':\n '1a3640eb5facfe15b1e23a07183a2e62ed80c7d9',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/en-it.trial.data.txt':\n '141c83d591b0292016583d9c23a2cc5514a006aa',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/es-fa.trial.data.txt':\n 'a0a548cd698c389ee80c34d6ec72abed5f1625e5',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/es-it.trial.data.txt':\n '8d42bed8a43ff93d26ca95794758d9392ca707ed',\n 'SemEval17-Task2/trial/subtask2-crosslingual/data/it-fa.trial.data.txt':\n '9c85223f1f734de61c28157df0ce417bb0537803',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-es.trial.gold.txt':\n '126c92b2fb3b8f2784dd4ae2a4c52b02a87a8196',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-fa.trial.gold.txt':\n '1db6201c2c8f19744c39dbde8bd4a803859d64c1',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/de-it.trial.gold.txt':\n '5300bf2ead163ff3981fb41ec5d0e291c287c9e0',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-de.trial.gold.txt':\n 'd4f5205de929bb0c4020e1502a3f2204b5accd51',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-es.trial.gold.txt':\n '3237e11c3a0d9c0f5d583f8dc1d025b97a1f8bfe',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-fa.trial.gold.txt':\n 'c14de7bf326907336a02d499c9b92ab229f3f4f8',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/en-it.trial.gold.txt':\n '3c0276c4b4e7a6d8a618bbe1ab0f30ad7b07929c',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-fa.trial.gold.txt':\n '359f69e9dfd6411a936baa3392b8f05c398a7707',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/es-it.trial.gold.txt':\n '44090607fabe5a26926a384e521ef1317f6f00d0',\n 'SemEval17-Task2/trial/subtask2-crosslingual/keys/it-fa.trial.gold.txt':\n '97b09ffa11803023c2143fd4a4ac4bbc9775e645',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-es.trial.sample.output.txt':\n 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-fa.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/de-it.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-de.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-es.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-fa.trial.sample.output.txt':\n 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/en-it.trial.sample.output.txt':\n 'a0735361a692be357963959728dacef85ea08240',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/es-fa.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/es-it.trial.sample.output.txt':\n 'b71166d8615e921ee689cefc81419398d341167f',\n 'SemEval17-Task2/trial/subtask2-crosslingual/output/it-fa.trial.sample.output.txt':\n 'a0735361a692be357963959728dacef85ea08240'}\n\nUD21_DATA_FILE_SHA1 = \\\n {'af': {'dev': ('af-ud-dev.conllu',\n 'e37b104f4425ee00afc81779201816d5ac525194'),\n 'test': ('af-ud-test.conllu',\n 'd2bf02370d308ee957c04242bd0871db0e488389'),\n 'train': ('af-ud-train.conllu',\n 'a652c7b19c236063d3ea489947f83095893b699a')},\n 'grc_proiel': {'dev': ('grc_proiel-ud-dev.conllu',\n 'd199530c7e40ff0214e510957bb126af0dc12c1c'),\n 'test': ('grc_proiel-ud-test.conllu',\n 'bb7825ddeb18fc2d86638e4725f04563f3e08aab'),\n 'train': ('grc_proiel-ud-train.conllu',\n 'fe6c861299b033abe8c4ce2b6131cd74f87b96a7')},\n 'grc': {'dev': ('grc-ud-dev.conllu',\n 'debdfec0272cd558ccd29fe0ae2f13175dd20a33'),\n 'test': ('grc-ud-test.conllu',\n 'f19accf31db95e2c736d716d3438c09aa877eb07'),\n 'train': ('grc-ud-train.conllu',\n 'e98d3eabea67787c5d43a498f5a0fa4246f38104')},\n 'ar_nyuad': {'dev': ('ar_nyuad-ud-dev.conllu',\n 'b740de9bd68e68b30b9b313eb050d44e94470ca5'),\n 'test': ('ar_nyuad-ud-test.conllu',\n 'f5d5b8979b7fedd76235d4bae77e0b4a7b0a750a'),\n 'train': ('ar_nyuad-ud-train.conllu',\n 'd065f03958fd8782a7431b6778c6665ad09444a6')},\n 'ar_pud': {'test': ('ar_pud-ud-test.conllu',\n '2161701e6726b6feb14733a312fba6160b9eb722')},\n 'ar': {'dev': ('ar-ud-dev.conllu',\n '5f8964974d5ba5eb3504cdafb93c34c473c4177c'),\n 'test': ('ar-ud-test.conllu',\n '58df161047f310cc3bb4d0e615ca33466e630bb9'),\n 'train': ('ar-ud-train.conllu',\n '0a3d5cefa1fecd6a74f2016ee73ea7a7a02eb359')},\n 'eu': {'dev': ('eu-ud-dev.conllu',\n '3ee15b5ed46ec93d7278c8cc0351d242417d553d'),\n 'test': ('eu-ud-test.conllu',\n 'aa68d6442ac6dc1abedc19c1b98c4a9944786188'),\n 'train': ('eu-ud-train.conllu',\n 'd56ec997916e38ee6ab1badd78c119e81e4797c9')},\n 'be': {'dev': ('be-ud-dev.conllu',\n '015473e91cf8937c46e8b721f206415abac16a35'),\n 'test': ('be-ud-test.conllu',\n 'f009ea1885f54cfd77fca8a2c89133b2af8f9f5e'),\n 'train': ('be-ud-train.conllu',\n '26b871e28d2f356a709f106b6e3e86b417ba74e7')},\n 'bg': {'dev': ('bg-ud-dev.conllu',\n '0a2284b10547681eb65691eb2a9f0f1662e16e90'),\n 'test': ('bg-ud-test.conllu',\n '75ea2a5e1d55bb57efecae6ec2b5ac3cc1b37e57'),\n 'train': ('bg-ud-train.conllu',\n 'd4b2fa267010c4486885c91f3af65ff66c8be94c')},\n 'bxr': {'sample': ('bxr-ud-sample.conllu',\n '9239bdd251a60820c71111ec54de9e7d58a8579d'),\n 'test': ('bxr-ud-test.conllu',\n '0a06e527454ae0b547153222f67eb5db94e528fd')},\n 'yue': {'test': ('yue-ud-test.conllu',\n 'd91477c65aa75cd45489cca13f7a122066972bdb')},\n 'ca': {'dev': ('ca-ud-dev.conllu',\n '5737824f0afff0d07a43db331f102d62c6da2d96'),\n 'test': ('ca-ud-test.conllu',\n '0e28bd2a3b982515c1158194ad52bcbbe741e170'),\n 'train': ('ca-ud-train.conllu',\n 'b5ff2392722d4a1df3bfc52fa5b8f2043b7aec0c')},\n 'zh_cfl': {'test': ('zh_cfl-ud-test.conllu',\n '32fe45cd0e4e11ced95202971bce74acbc6a8c30')},\n 'zh_hk': {'test': ('zh_hk-ud-test.conllu',\n '4c75fa5bbcdcb181447b4e037224d50feb2776fb')},\n 'zh_pud': {'test': ('zh_pud-ud-test.conllu',\n 'b3e448884b7b6229379f9723b97c6e9a6fedcb61')},\n 'zh': {'dev': ('zh-ud-dev.conllu',\n '34d8253b35ad2245d59ddffa71b5689ef267b6b2'),\n 'test': ('zh-ud-test.conllu',\n '0f00516097650c12262298dd0fbd1b17a6d2bfe2'),\n 'train': ('zh-ud-train.conllu',\n '9444eec5f4561f289ad140e47e49013689512a65')},\n 'cop': {'dev': ('cop-ud-dev.conllu',\n '863d1004df1a92df52515105f6fae6ff68539595'),\n 'test': ('cop-ud-test.conllu',\n 'd3b33566679f071d4ad622ad840cd98381835706'),\n 'train': ('cop-ud-train.conllu',\n '33d0e5de5d6077f7c52a4cd90bce0047f3e9ff6f')},\n 'hr': {'dev': ('hr-ud-dev.conllu',\n '8da2a419980807d2e91e09b6bf496e58d442b0ba'),\n 'test': ('hr-ud-test.conllu',\n '49d673cba3d32d39d413e557276a45a0214ed83e'),\n 'train': ('hr-ud-train.conllu',\n 'e5cc686bb46c80c84c3ac60ed459e1f124c04c08')},\n 'cs_cac': {'dev': ('cs_cac-ud-dev.conllu',\n '69dfed28c29146b41a3428f4715bde70a6aecf00'),\n 'test': ('cs_cac-ud-test.conllu',\n 'a994b33ebbde486c1818a9df460fb112055e95de'),\n 'train': ('cs_cac-ud-train.conllu',\n '694f8559471dc481612606bf5df078daa094a84e')},\n 'cs_cltt': {'dev': ('cs_cltt-ud-dev.conllu',\n 'f35d5dbe57cd95760901ea29de4f493d5d2a44d4'),\n 'test': ('cs_cltt-ud-test.conllu',\n 'a8f6696785e658471f759bc736b738a105cba9a3'),\n 'train': ('cs_cltt-ud-train.conllu',\n 'ab97886066bfa462e5da03d25f802489292c0b56')},\n 'cs_fictree': {'dev': ('cs_fictree-ud-dev.conllu',\n 'dc67c07737a3a8bf2633068941f2d55f1500e192'),\n 'test': ('cs_fictree-ud-test.conllu',\n '06becaedef1cfdb8e1b2dce3f0d3a3a607d178a4'),\n 'train': ('cs_fictree-ud-train.conllu',\n 'fe7dbe3a0e6ee73e19e788c43bbb8f8f47ae1645')},\n 'cs_pud': {'test': ('cs_pud-ud-test.conllu',\n '9f205677041de694157ba2ef3e1eadb44d467f2f')},\n 'cs': {'dev': ('cs-ud-dev.conllu',\n 'd609e895b21b8710337e23a98b58ffd7b7a54bf1'),\n 'test': ('cs-ud-test.conllu',\n '34091286a11b1ce2a9c8bcfa03fdd86fb0e13965'),\n 'train': ('cs-ud-train.conllu',\n 'd1f855798a29d433b580d01ade0d8d062cd58534')},\n 'da': {'dev': ('da-ud-dev.conllu',\n '2c0c798c20a2efb30273172d388342a82bb0ce3c'),\n 'test': ('da-ud-test.conllu',\n '85a95a8527f8773f1575ceaf0ab51f204b211047'),\n 'train': ('da-ud-train.conllu',\n 'b653c029a7ae5c106f865dcef949fb3fe2aa0420')},\n 'nl_lassysmall': {'dev': ('nl_lassysmall-ud-dev.conllu',\n '2a169af74c2206c9073c3932b4a300492a314ee5'),\n 'test': ('nl_lassysmall-ud-test.conllu',\n '39f08896a40ad370f2acc37d58689cdc43a660a9'),\n 'train': ('nl_lassysmall-ud-train.conllu',\n 'e4fd6bac246c81bb17a3c932e251b8662739cc19')},\n 'nl': {'dev': ('nl-ud-dev.conllu',\n '33a9387eef9f5c0b15bd1e76e78776863f1f6d90'),\n 'test': ('nl-ud-test.conllu',\n '01b3e1048792c851fdd59882c353fcdb76dc165e'),\n 'train': ('nl-ud-train.conllu',\n '8e6a10152b7d09ce61433dd5f715ab2401611cf6')},\n 'en_lines': {'dev': ('en_lines-ud-dev.conllu',\n '83b63b7670ea4394b558bc26e16a004339f0a0ef'),\n 'test': ('en_lines-ud-test.conllu',\n 'ccc9d3c71a873313d138c3adb12405a97eb270d8'),\n 'train': ('en_lines-ud-train.conllu',\n 'da42bfac9fd97d98ebbbc37c65d83ff4c53b4e79')},\n 'en_pud': {'test': ('en_pud-ud-test.conllu',\n '4a9c83ba058a7e51979af790ba0440cc274b948f')},\n 'en_partut': {'dev': ('en_partut-ud-dev.conllu',\n '863a6f571158acaaca95223e50bd08fc0c1134f0'),\n 'test': ('en_partut-ud-test.conllu',\n '0c0780b0f14e4623f1014e6496d639cd2d2f6ffd'),\n 'train': ('en_partut-ud-train.conllu',\n 'e00a2d6f7efa28c8aaa40dccdf29b59a50f48e18')},\n 'en': {'dev': ('en-ud-dev.conllu',\n 'e2159dda4400d289ad8a403b466c8d23d733ba35'),\n 'test': ('en-ud-test.conllu',\n 'bd36ef23f76155625b379d063427bd62f19b7658'),\n 'train': ('en-ud-train.conllu',\n '993c44f62104971fe2d056847349facbb7986258')},\n 'et': {'dev': ('et-ud-dev.conllu',\n '312f9477f7ee1dd380c1fbcf77a6f0c63476fdbb'),\n 'test': ('et-ud-test.conllu',\n 'd70907f0771b41a27406672b9d91043a0954f946'),\n 'train': ('et-ud-train.conllu',\n 'b6d788e7a3362d0984d1cff06c1ba3d66f6bf773')},\n 'fi_ftb': {'dev': ('fi_ftb-ud-dev.conllu',\n '552ec574acdb3209e7545af4e16a43a1e2956979'),\n 'test': ('fi_ftb-ud-test.conllu',\n '13c34838a0fa9e379f9624ed1f4c368ca50a7d98'),\n 'train': ('fi_ftb-ud-train.conllu',\n '73d025250bfc82a24181b5ed601dc4ae7c8e846c')},\n 'fi_pud': {'test': ('fi_pud-ud-test.conllu',\n '4ab7b0d99ce6697d79732e401be97585a28c2afa')},\n 'fi': {'dev': ('fi-ud-dev.conllu',\n 'e023cf7eaffbda20bd4518d87fe9086207bb5361'),\n 'test': ('fi-ud-test.conllu',\n 'fd57c5106e43994250f4472890572bdbb8b4a48b'),\n 'train': ('fi-ud-train.conllu',\n 'ab27bda8cbb62886196b78de87985a4c6cf8215d')},\n 'fr_ftb': {'dev': ('fr_ftb-ud-dev.conllu',\n '71b3cc02601f64711f98e33a6b2af10aa00700be'),\n 'test': ('fr_ftb-ud-test.conllu',\n '723b8c44e74202a18b7e71268b738a5e1aa15f86'),\n 'train': ('fr_ftb-ud-train.conllu',\n '9a347120478254647deb7c7e02871b28aad23ec4')},\n 'fr_pud': {'test': ('fr_pud-ud-test.conllu',\n '570b7e31dc359ed62123bea6546efa13cfc2cf25')},\n 'fr_partut': {'dev': ('fr_partut-ud-dev.conllu',\n '1505030048829a8dccc466cc86bca057996301ae'),\n 'test': ('fr_partut-ud-test.conllu',\n 'f6446317c9f82cc0b70a76be75282804a3359ac0'),\n 'train': ('fr_partut-ud-train.conllu',\n 'f87c246cfa91186b90c7780cb64783034f196622')},\n 'fr_sequoia': {'dev': ('fr_sequoia-ud-dev.conllu',\n '859b10d80c7b3a382571cce9b2620039673539d1'),\n 'test': ('fr_sequoia-ud-test.conllu',\n 'be0ef69e392e64030414748da2995433f23e033d'),\n 'train': ('fr_sequoia-ud-train.conllu',\n '48ac01913518888a32670a687123ed1bac57e0e9')},\n 'fr': {'dev': ('fr-ud-dev.conllu',\n '5de0aee778bcc69d14285ada88f0ff7e5ac0a0cd'),\n 'test': ('fr-ud-test.conllu',\n 'd20a014acd38193155a33a5233c13f89541c78c3'),\n 'train': ('fr-ud-train.conllu',\n 'feee0cc85a2d7dcb3397399ef22c8af8ef75420b')},\n 'gl_treegal': {'dev': ('gl_treegal-ud-dev.conllu',\n '272558614cff4a5e1f2805626904e6dc488b8d25'),\n 'test': ('gl_treegal-ud-test.conllu',\n '18d99474d3aa9c83878c42a79d7881330dd9b861'),\n 'train': ('gl_treegal-ud-train.conllu',\n 'b1691dd5f587a19eb9dc6f141ecbd3eec3bb0e07')},\n 'gl': {'dev': ('gl-ud-dev.conllu',\n 'e72390dce9bf973442deef31ed0cd7a975361fe5'),\n 'test': ('gl-ud-test.conllu',\n '7d82ba3672bd4427674428e1dcbcae4feebc3aeb'),\n 'train': ('gl-ud-train.conllu',\n 'd586e7bffa314f8c5b85288e060e68dddc1f5d33')},\n 'de_pud': {'test': ('de_pud-ud-test.conllu',\n '2c91e42b7345145290b68385ff5270910048b8c4')},\n 'de': {'dev': ('de-ud-dev.conllu',\n '9b4f49bfa2b609d54369890d9e7d8d24a3c229af'),\n 'test': ('de-ud-test.conllu',\n '48f0f6f98b38710906481b5e9fe1d459d28f1b4a'),\n 'train': ('de-ud-train.conllu',\n '04a1d6a6a2da9d9c38496118e0432c9a6720db64')},\n 'got': {'dev': ('got-ud-dev.conllu',\n '501c47193ca2af5826e4afcc04941df87a7c47c3'),\n 'test': ('got-ud-test.conllu',\n 'cfcf16d562434987562bd1f5faa0d8c007e9ddb8'),\n 'train': ('got-ud-train.conllu',\n 'b4951ede89d947c6617df782ac248566235f78fb')},\n 'el': {'dev': ('el-ud-dev.conllu',\n '9df0919ed6f9dcab3ba3f60f0ad31d0c79ae6cdb'),\n 'test': ('el-ud-test.conllu',\n '1bb4a6b24521f0c3c7d6cf71e2456ef3a1ee31aa'),\n 'train': ('el-ud-train.conllu',\n '32f4abc821624c4cd4d3b3b555c1558f06366e2c')},\n 'he': {'dev': ('he-ud-dev.conllu',\n 'c5b76874fcf11c7733e1555957bb49e8298af140'),\n 'test': ('he-ud-test.conllu',\n '4fbe4115948250fc2e42dd43399d1c6c11ddcfd2'),\n 'train': ('he-ud-train.conllu',\n 'eae49a515b38d224b109138bf006a112e80a7caf')},\n 'hi_pud': {'test': ('hi_pud-ud-test.conllu',\n 'd237fecc594186e7a52ad33313ac52e927905d73')},\n 'hi': {'dev': ('hi-ud-dev.conllu',\n '48b592bb1aa1cbc30d41d2913421cfd3f9d2c790'),\n 'test': ('hi-ud-test.conllu',\n '004a7fdde368f32f9f230bc5e2cf4ce9e1d8f8d7'),\n 'train': ('hi-ud-train.conllu',\n '9be8afb2cabda361817c55b3de6ebba2c3fef7e0')},\n 'hu': {'dev': ('hu-ud-dev.conllu',\n 'ec622e6bcf2a84b0b47eba0de01cf5768157a50e'),\n 'test': ('hu-ud-test.conllu',\n 'fd717d25add38c2fb2dc8e82e2f9e5b0b9f3c5b8'),\n 'train': ('hu-ud-train.conllu',\n 'e5486523a8bebe40d633ad8b4050be8a3d11c78a')},\n 'id': {'dev': ('id-ud-dev.conllu',\n '7b181aa954a4f4b22b80a18e4f67cbf423e9c701'),\n 'test': ('id-ud-test.conllu',\n '357ed8c216725760bf5be561ed6e918ce602b5ac'),\n 'train': ('id-ud-train.conllu',\n '328ea588b75de55ef48373c2bf9983bca277d724')},\n 'ga': {'dev': ('ga-ud-dev.conllu',\n '180a1a9dcfcec6528a559032c67e9a15693a039d'),\n 'test': ('ga-ud-test.conllu',\n 'b74a56372af3f68f089ea82ba858e5a82aae4e22'),\n 'train': ('ga-ud-train.conllu',\n '40df0b12fbadae6e56c0a01da483d6c612d9450c')},\n 'it_pud': {'test': ('it_pud-ud-test.conllu',\n 'c7121c03dbdc7d27f89c6f6dd8f046b89233438e')},\n 'it_partut': {'dev': ('it_partut-ud-dev.conllu',\n '0bb5dc0c0815212c9832eaef3b802cf885e0543b'),\n 'test': ('it_partut-ud-test.conllu',\n 'b5eccd3d9a94a2f96c8c3a6e4192a287ac563898'),\n 'train': ('it_partut-ud-train.conllu',\n '784b18bf8d3b59d967d147075a3cb5b03fb28637')},\n 'it_postwita': {'dev': ('it_postwita-ud-dev.conllu',\n '07f6f658246aa070e2166e688f7569d61aafff54'),\n 'test': ('it_postwita-ud-test.conllu',\n 'c2d58f50e51d37cb5f55bd0a3129138e95a72a8a'),\n 'train': ('it_postwita-ud-train.conllu',\n '69684c47fba99230f6ef1a204b95c37d28eaa5a6')},\n 'it': {'dev': ('it-ud-dev.conllu',\n 'ea8fd59f36280fbd77b9a807959491636048a698'),\n 'test': ('it-ud-test.conllu',\n '34839fdeeef883f8034c723a18772947106cec6b'),\n 'train': ('it-ud-train.conllu',\n 'a0cae413f46a344366f86bc7ffe4f5d7ecbf6a14')},\n 'ja_pud': {'test': ('ja_pud-ud-test.conllu',\n '4c914016a0968ca434348370d38c9579a60e8fd7')},\n 'ja': {'dev': ('ja-ud-dev.conllu',\n '21f06fef7fbeccd05a298385bf40f8b4ffe95146'),\n 'test': ('ja-ud-test.conllu',\n '240d3532698356a7c6f93c3215718ef2f66a672f'),\n 'train': ('ja-ud-train.conllu',\n '35eaf307d94c2006241fe08f745d7b1b17f049cf')},\n 'kk': {'dev': ('kk-ud-dev.conllu',\n '038033c822b407040a4ecb87c077506cd0d1a322'),\n 'test': ('kk-ud-test.conllu',\n '4124bcaa6e4fc132613d94a882abcff8ecad8ca0'),\n 'train': ('kk-ud-train.conllu',\n '48d664d273ad6731cb65228ce9b57ad3cf50f7f5')},\n 'ko': {'dev': ('ko-ud-dev.conllu',\n '60e7da7cca44c923873a062e80262726659f5528'),\n 'test': ('ko-ud-test.conllu',\n 'bc9a0fc4ddfed14b70bb58048bf8b8d50062cffd'),\n 'train': ('ko-ud-train.conllu',\n 'ee21328f9ea39668e802f0cb6a794358f5c256bf')},\n 'kmr': {'sample': ('kmr-ud-sample.conllu',\n 'd76d631400d17b63b9592ce3c0f4ecada012d6d0'),\n 'test': ('kmr-ud-test.conllu',\n '606a338db2d6adde6b4d7d8c9ee2bdf1f988d729')},\n 'la_ittb': {'dev': ('la_ittb-ud-dev.conllu',\n 'd9f17992bd0258a734aea9b6c53759039717c86a'),\n 'test': ('la_ittb-ud-test.conllu',\n 'f4d097d076083240c48594d4cb058840ff16be8e'),\n 'train': ('la_ittb-ud-train.conllu',\n '627d5b30b20655efab194c75fc9219b0aa2cf4b6')},\n 'la_proiel': {'dev': ('la_proiel-ud-dev.conllu',\n '9a510ff1f29b507ce46d32c04eb8f02ec8bdb4fb'),\n 'test': ('la_proiel-ud-test.conllu',\n '697dbeae38507856a4fafa8506dfc8db5e8e4054'),\n 'train': ('la_proiel-ud-train.conllu',\n '5e57e0a83ed8dcdfcc892c2558249cb6bc02b37a')},\n 'la': {'dev': ('la-ud-dev.conllu',\n '2748bb0479cb599e1a007d1d1634d5870b45549b'),\n 'test': ('la-ud-test.conllu',\n '19c62c64ce41a650e9b55a345c61e7c0d994816e'),\n 'train': ('la-ud-train.conllu',\n '183ce6f58b0305e5926161e29b9a6aacc424662c')},\n 'lv': {'dev': ('lv-ud-dev.conllu',\n '6bf3843d92aeb5b4a5e3b457708ad0aca176fbd2'),\n 'test': ('lv-ud-test.conllu',\n '9f7806a24656db0e859efe041a88926b220b8e28'),\n 'train': ('lv-ud-train.conllu',\n 'f1eeff608e8f27d92b683ae041591355198841eb')},\n 'lt': {'dev': ('lt-ud-dev.conllu',\n '0b8dc19005571fa7b66d8302b797d51a241f128b'),\n 'test': ('lt-ud-test.conllu',\n 'def54d6caf97610eb4ca8c0179d661c8eab98951'),\n 'train': ('lt-ud-train.conllu',\n '13fe42a3d21f17a5cad5aaf38692619c7713e177')},\n 'mr': {'dev': ('mr-ud-dev.conllu',\n 'abf7ac90a3696bb979e6ddc17cbc0fc761040b1b'),\n 'test': ('mr-ud-test.conllu',\n 'b70e2a135e69dc17474951bfd9c7cf3f203d4798'),\n 'train': ('mr-ud-train.conllu',\n '24a1370184054a7f5af647997dca783d6c571242')},\n 'sme': {'sample': ('sme-ud-sample.conllu',\n '8c456f06b363c4d273fc454a49505f783f00fe43'),\n 'test': ('sme-ud-test.conllu',\n '6c2084f60d7f2d1468a0cb4f4a4b9669274b122e'),\n 'train': ('sme-ud-train.conllu',\n '203eab4183fd585efe3fea7e6df493a6746b0a9f')},\n 'no_bokmaal': {'dev': ('no_bokmaal-ud-dev.conllu',\n '3a1aa6646ee62c605a6e5a7b535434ce93d0581f'),\n 'test': ('no_bokmaal-ud-test.conllu',\n '18336ef0e4877ae28eb7d6019afe05b5a53245d5'),\n 'train': ('no_bokmaal-ud-train.conllu',\n 'c6a1d75956dfb9376e568bf241b3ee5ebf3be3a5')},\n 'no_nynorsk': {'dev': ('no_nynorsk-ud-dev.conllu',\n '5b95a070d11a61a23fc340ecbbbbb70f86884498'),\n 'test': ('no_nynorsk-ud-test.conllu',\n '3eaab8e4af82de2333521e9be0954ffaf6b1440b'),\n 'train': ('no_nynorsk-ud-train.conllu',\n '79319993097c30ddf28d4c1137b8662f4f35d17e')},\n 'no_nynorsklia': {'dev': ('no_nynorsklia-ud-dev.conllu',\n 'f3e3cc9b156784c12e7540b6e09a19963df8d7d9'),\n 'test': ('no_nynorsklia-ud-test.conllu',\n 'c43abf4ad0d9c1d844edb9ff0fdf8b00949c4a0b')},\n 'cu': {'dev': ('cu-ud-dev.conllu',\n '0b67035ed5ca52aeefae443611232ed202fb990a'),\n 'test': ('cu-ud-test.conllu',\n '0fed872a5a2480b601c67ebbecf8dcd680b6863b'),\n 'train': ('cu-ud-train.conllu',\n '1c58f7322b96aa65e2b6bbeb5cb5226b46dc3ef0')},\n 'fa': {'dev': ('fa-ud-dev.conllu',\n '098f97ff4c0a6a9dcaafe2c83908b1ff044b4446'),\n 'test': ('fa-ud-test.conllu',\n '0024aa6bad5eceed2e36f77d88578304a5886a80'),\n 'train': ('fa-ud-train.conllu',\n '1692f90f58fb1ed2faaa4e8c5d2d47a37c47082b')},\n 'pl': {'dev': ('pl-ud-dev.conllu',\n 'b7af7bee091feb0788eb9793a7102972006421dc'),\n 'test': ('pl-ud-test.conllu',\n 'e141e793ba35f8a08510ec1ce494099b5c800ca8'),\n 'train': ('pl-ud-train.conllu',\n 'f2227ba184a5030fc47b1aff732e04ae11b9ab94')},\n 'pt_br': {'dev': ('pt_br-ud-dev.conllu',\n '8eedc77096a87fe8ab251100d460780e161e5397'),\n 'test': ('pt_br-ud-test.conllu',\n '37a64e3acef107b62ab62ce478fc36ed112fb58f'),\n 'train': ('pt_br-ud-train.conllu',\n '023cafcb6959d52298ad619f7838f26db9798aa9')},\n 'pt_pud': {'test': ('pt_pud-ud-test.conllu',\n '4f7a98b59255ff58a1a423dda6f2cb7261dcea7d')},\n 'pt': {'dev': ('pt-ud-dev.conllu',\n '2171b4ac2b0726c9dfae6adf394b76be927accab'),\n 'test': ('pt-ud-test.conllu',\n '9e819a4592db42905806141d6fca3b7b20396ce3'),\n 'train': ('pt-ud-train.conllu',\n 'b5fbb6598d5cc53a0f7e699adeb4a61948a49b5c')},\n 'ro_nonstandard': {'test': ('ro_nonstandard-ud-test.conllu',\n '300d53091412dc5700dc5cad0fd3e136f7c8cb11'),\n 'train': ('ro_nonstandard-ud-train.conllu',\n 'ed97f51129b63857627f838f68f41c9ef8541686')},\n 'ro': {'dev': ('ro-ud-dev.conllu',\n 'a320e29582e837fa48bbe0aab8e205cadfcb4a02'),\n 'test': ('ro-ud-test.conllu',\n '0cfe4806a28ebdc02dc7ea58635d8b550c3a9d7b'),\n 'train': ('ro-ud-train.conllu',\n '74beb2aa92d2fca50dbb1a4f716b936afb436ab9')},\n 'ru_pud': {'test': ('ru_pud-ud-test.conllu',\n 'bca81ce7aaf3cb8add98b19faecc1d8303901631')},\n 'ru_syntagrus': {'dev': ('ru_syntagrus-ud-dev.conllu',\n '304c6ec7fb5060583af5f890384e3a480f8c3ad5'),\n 'test': ('ru_syntagrus-ud-test.conllu',\n 'c138e39b48dc1c66d106e68ee75c6fce28ef780c'),\n 'train': ('ru_syntagrus-ud-train.conllu',\n '8fa56fa80845e4ad946189d1e7af228b5595e312')},\n 'ru': {'dev': ('ru-ud-dev.conllu',\n 'd3b11c0fd8a87bfb7ce9666a1888126ae5ddca90'),\n 'test': ('ru-ud-test.conllu',\n 'ae13bbf49e0d2fddae8ba2eeacd15a9a77c7bfff'),\n 'train': ('ru-ud-train.conllu',\n 'fd43e7323ad2e62a6924fc5b5d48e85c6ab5a430')},\n 'sa': {'test': ('sa-ud-test.conllu',\n 'fad3a03a6834884a092b1d326625c6f663e36636')},\n 'sr': {'dev': ('sr-ud-dev.conllu',\n 'dcb9a242986285e83512ddaa4b3ada07c4cea17a'),\n 'test': ('sr-ud-test.conllu',\n '0f0c9e394c440bb2dd514bdd6873d3ffef13821b'),\n 'train': ('sr-ud-train.conllu',\n '97ea9bfe4ac97011598fbb5ca20b5cbaf5093334')},\n 'sk': {'dev': ('sk-ud-dev.conllu',\n 'c84563c08922d60b0c765e9f9c22d9f6f2765ff9'),\n 'test': ('sk-ud-test.conllu',\n '89af4581c5f9058809f48788eb635a92cda0603c'),\n 'train': ('sk-ud-train.conllu',\n '89e108093bbf5619578955fdadfe200cefd8cf01')},\n 'sl_sst': {'dev': ('sl_sst-ud-dev.conllu',\n 'c65ae82123af95ec11f47262546b5ab2fc5735e5'),\n 'test': ('sl_sst-ud-test.conllu',\n '144a0124c1181b49d0c542a4a6d4465e45545f3b'),\n 'train': ('sl_sst-ud-train.conllu',\n '4cbb97d5c19cfb1d85cdd54a13e24de2343a4ac5')},\n 'sl': {'dev': ('sl-ud-dev.conllu',\n '0078572c19574d32defeae9924176da2dd701ede'),\n 'test': ('sl-ud-test.conllu',\n '616ace00e25df99be8dd49b7bf7c48f1093df96a'),\n 'train': ('sl-ud-train.conllu',\n '1462ac69163b30cf1399527e95f686ebf91be2d3')},\n 'es_ancora': {'dev': ('es_ancora-ud-dev.conllu',\n '94b00cc6449a1793b5ba1d9d5c1e4b34ad1cc7d5'),\n 'test': ('es_ancora-ud-test.conllu',\n '8d7dc8d8441e1ca4b54708a5382ed61b48bf7920'),\n 'train': ('es_ancora-ud-train.conllu',\n '95d5bf7ad33304f3440ffb014ac094c4967c303f')},\n 'es_pud': {'test': ('es_pud-ud-test.conllu',\n 'c2b17fce1da3bdd2a50d9dd7eca101db1d2907e0')},\n 'es': {'dev': ('es-ud-dev.conllu',\n '4cdb828c492c6b7707af0ab6c7fbf734f770630a'),\n 'test': ('es-ud-test.conllu',\n 'afd1ae1b7eb73a91456c30acf388eef4faf4785a'),\n 'train': ('es-ud-train.conllu',\n '5ce48b44ba1b3e748a40cb5bf893d3096518ecbc')},\n 'sv_lines': {'dev': ('sv_lines-ud-dev.conllu',\n '15f1a04d960518fe7bfee23ce227fc7b78d4b755'),\n 'test': ('sv_lines-ud-test.conllu',\n '843df4ea3ab4f551b1eaa661652a8d6489a81d41'),\n 'train': ('sv_lines-ud-train.conllu',\n '16e3533bf174b36d728847a36a3600f16c63baa6')},\n 'sv_pud': {'test': ('sv_pud-ud-test.conllu',\n '18dadac0c15468256b340835ebc0529facbe9b73')},\n 'sv': {'dev': ('sv-ud-dev.conllu',\n '6d14e1aae5c9ae37c35481c44c04bf74a4233455'),\n 'test': ('sv-ud-test.conllu',\n '7ead0f7b49508db0022c042195ac5925b611c5b7'),\n 'train': ('sv-ud-train.conllu',\n '68affb85efde6ed017eab1e998e9666108559e04')},\n 'swl': {'dev': ('swl-ud-dev.conllu',\n '828e0a08f12cabfa75f9dd2b53dba58606522a7c'),\n 'test': ('swl-ud-test.conllu',\n '674f76631cf16172d67b795ff92dfbb297eb4930'),\n 'train': ('swl-ud-train.conllu',\n '46b721f9cae2d5ba43f818dd487600b0ce76362a')},\n 'ta': {'dev': ('ta-ud-dev.conllu',\n '4d01f555012ddc1976933d4d928e26470f71bfa1'),\n 'test': ('ta-ud-test.conllu',\n 'e8db8816a98d8b7e81188786db7c405979a7e3c3'),\n 'train': ('ta-ud-train.conllu',\n '6753d8c7b1b016de39c087aab45056de6021c3ae')},\n 'te': {'dev': ('te-ud-dev.conllu',\n '29f46355d767e54e8565f76a063c43e95ead0fca'),\n 'test': ('te-ud-test.conllu',\n '50abe345d4ab5bae021cacd096266c57b00572b8'),\n 'train': ('te-ud-train.conllu',\n '1794469abe09e7364cda0d9764cf515dcb4a61b6')},\n 'tr_pud': {'test': ('tr_pud-ud-test.conllu',\n 'aae839e2476a2f149c98e0274d245d07a50dafaa')},\n 'tr': {'dev': ('tr-ud-dev.conllu',\n '421de4d8d0fbdda46750523bde72880414c134a3'),\n 'test': ('tr-ud-test.conllu',\n 'b175f136f6f0271c494a58a1846971c4a07cda27'),\n 'train': ('tr-ud-train.conllu',\n '5aeaf25fc9e00c75e377983a0d0a642e4df6ae7d')},\n 'uk': {'dev': ('uk-ud-dev.conllu',\n '0d3e3507edcd46a3eaa8c4702d0f5d84661a6d9d'),\n 'test': ('uk-ud-test.conllu',\n '46c88fd623894fabdafb01a826016c215e4f65cc'),\n 'train': ('uk-ud-train.conllu',\n 'd06e0e2fa67c35a20517738bd728ac3b26d8eafe')},\n 'hsb': {'sample': ('hsb-ud-sample.conllu',\n '148eddbb19b06115ea54e17a3fca58e99a85cbd9'),\n 'test': ('hsb-ud-test.conllu',\n '3d319288b4c06395b2627980737131995949f770')},\n 'ur': {'dev': ('ur-ud-dev.conllu',\n 'dc41e72b5adeb92f308cdc8dfcbf71f84b4a5cf9'),\n 'test': ('ur-ud-test.conllu',\n 'af5da25be4c4ec1f2a222bc462b39ca4bbcc0eb0'),\n 'train': ('ur-ud-train.conllu',\n '488d65b394d0de264be1221614c09e541f92f9de')},\n 'ug': {'dev': ('ug-ud-dev.conllu',\n 'a2e6cd7ef51ffd7c83de7c62fbad998f1020f857'),\n 'test': ('ug-ud-test.conllu',\n '4877323d8dbfaa8ab862f0aa8e5484fdadb9ef43')},\n 'vi': {'dev': ('vi-ud-dev.conllu',\n '1c733d3ea3e4cce00cb0aa4d599bcb3b0a6096a8'),\n 'test': ('vi-ud-test.conllu',\n '1bb822e58f21aa5ccac15fe6c6742a42e8389d41'),\n 'train': ('vi-ud-train.conllu',\n 'ac86132afc061625740abd524c5cdf3d35ebbbc4')}}\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import random
# Imports MongoClient for base level access to the local MongoDB
from pymongo import MongoClient
# Imports datetime class to create timestamp for weather data storage
from datetime import datetime
# Importing DailyReportModel class to use the implemented method to insert data into daily_report_model collection
from model import DailyReportModel
# Database host ip and port information
HOST = '127.0.0.1'
PORT = '27017'
RELATIVE_CONFIG_PATH = '../config/'
DB_NAME = 'weather_db'
USER_COLLECTION = 'users'
DEVICE_COLLECTION = 'devices'
WEATHER_DATA_COLLECTION = 'weather_data'
DAILY_REPORT_MODEL = 'daily_report_model'
# This will initiate connection to the mongodb
db_handle = MongoClient(f'mongodb://{HOST}:{PORT}')
# We drop the existing database including all the collections and data
db_handle.drop_database(DB_NAME)
# We recreate the database with the same name
weather_dbh = db_handle[DB_NAME]
# user data import
# User document contains username (String), email (String), and role (String) fields
# Reads users.csv one line at a time, splits them into the data fields and inserts
with open(RELATIVE_CONFIG_PATH+USER_COLLECTION+'.csv', 'r') as user_fh:
for user_row in user_fh:
user_row = user_row.rstrip()
if user_row:
(username, email, role) = user_row.split(',')
user_data = {'username': username, 'email': email, 'role': role}
# This creates and return a pointer to the users collection
user_collection = weather_dbh[USER_COLLECTION]
# This inserts the data item as a document in the user collection
user_collection.insert_one(user_data)
# device data import
# Device document contains device_id (String), desc (String), type (String - temperature/humidity) and manufacturer (String) fields
# Reads devices.csv one line at a time, splits them into the data fields and inserts
with open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
(device_id, desc, type, manufacturer) = device_row.split(',')
device_data = {'device_id': device_id, 'desc': desc, 'type': type, 'manufacturer': manufacturer}
# This creates and return a pointer to the devices collection
device_collection = weather_dbh[DEVICE_COLLECTION]
# This inserts the data item as a document in the devices collection
device_collection.insert_one(device_data)
# weather data generation
# Weather data document contains device_id (String), value (Integer), and timestamp (Date) fields
# Reads devices.csv one line at a time to get device id and type. It then loops for five days (2020-12-01 to 2020-12-05
# For each device and day, it creates random values for each hour (at the 30 minute mark) and stores the data
#Created a list to populate it with device id and timestamp
devdaylist = []
with open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:
for device_row in device_fh:
device_row = device_row.rstrip()
if device_row:
# _ can be used to ignore values that are not needed
(device_id, _, type, _) = device_row.split(',')
for day in range(1,6):
#creating and appending data to the list
day1 = datetime(2020, 12, day)
devdaylist.append((device_id, day1))
for hour in range(0,24):
timestamp = datetime(2020, 12, day, hour, 30, 0)
# Generates random data value in appropriate range as per the type of sensor (normal bell-curve distribution)
if (type.lower() == 'temperature'):
value = int(random.normalvariate(24,2.2))
elif (type.lower() == 'humidity'):
value = int(random.normalvariate(45,3))
weather_data = {'device_id': device_id, 'value': value, 'timestamp': timestamp}
weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]
# This inserts the data item as a document in the weather_data collection
weather_data_collection.insert_one(weather_data)
#Populating the data to daily_report_model collection on setup
drm = DailyReportModel()
for ddy in devdaylist:
drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')
|
normal
|
{
"blob_id": "a8b1b218e6649545000803c91c803580cfdbd4f1",
"index": 459,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb_handle.drop_database(DB_NAME)\n<mask token>\nwith open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:\n for user_row in user_fh:\n user_row = user_row.rstrip()\n if user_row:\n username, email, role = user_row.split(',')\n user_data = {'username': username, 'email': email, 'role': role}\n user_collection = weather_dbh[USER_COLLECTION]\n user_collection.insert_one(user_data)\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, desc, type, manufacturer = device_row.split(',')\n device_data = {'device_id': device_id, 'desc': desc, 'type': type,\n 'manufacturer': manufacturer}\n device_collection = weather_dbh[DEVICE_COLLECTION]\n device_collection.insert_one(device_data)\n<mask token>\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, _, type, _ = device_row.split(',')\n for day in range(1, 6):\n day1 = datetime(2020, 12, day)\n devdaylist.append((device_id, day1))\n for hour in range(0, 24):\n timestamp = datetime(2020, 12, day, hour, 30, 0)\n if type.lower() == 'temperature':\n value = int(random.normalvariate(24, 2.2))\n elif type.lower() == 'humidity':\n value = int(random.normalvariate(45, 3))\n weather_data = {'device_id': device_id, 'value': value,\n 'timestamp': timestamp}\n weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]\n weather_data_collection.insert_one(weather_data)\n<mask token>\nfor ddy in devdaylist:\n drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')\n",
"step-3": "<mask token>\nHOST = '127.0.0.1'\nPORT = '27017'\nRELATIVE_CONFIG_PATH = '../config/'\nDB_NAME = 'weather_db'\nUSER_COLLECTION = 'users'\nDEVICE_COLLECTION = 'devices'\nWEATHER_DATA_COLLECTION = 'weather_data'\nDAILY_REPORT_MODEL = 'daily_report_model'\ndb_handle = MongoClient(f'mongodb://{HOST}:{PORT}')\ndb_handle.drop_database(DB_NAME)\nweather_dbh = db_handle[DB_NAME]\nwith open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:\n for user_row in user_fh:\n user_row = user_row.rstrip()\n if user_row:\n username, email, role = user_row.split(',')\n user_data = {'username': username, 'email': email, 'role': role}\n user_collection = weather_dbh[USER_COLLECTION]\n user_collection.insert_one(user_data)\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, desc, type, manufacturer = device_row.split(',')\n device_data = {'device_id': device_id, 'desc': desc, 'type': type,\n 'manufacturer': manufacturer}\n device_collection = weather_dbh[DEVICE_COLLECTION]\n device_collection.insert_one(device_data)\ndevdaylist = []\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, _, type, _ = device_row.split(',')\n for day in range(1, 6):\n day1 = datetime(2020, 12, day)\n devdaylist.append((device_id, day1))\n for hour in range(0, 24):\n timestamp = datetime(2020, 12, day, hour, 30, 0)\n if type.lower() == 'temperature':\n value = int(random.normalvariate(24, 2.2))\n elif type.lower() == 'humidity':\n value = int(random.normalvariate(45, 3))\n weather_data = {'device_id': device_id, 'value': value,\n 'timestamp': timestamp}\n weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]\n weather_data_collection.insert_one(weather_data)\ndrm = DailyReportModel()\nfor ddy in devdaylist:\n drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')\n",
"step-4": "import random\nfrom pymongo import MongoClient\nfrom datetime import datetime\nfrom model import DailyReportModel\nHOST = '127.0.0.1'\nPORT = '27017'\nRELATIVE_CONFIG_PATH = '../config/'\nDB_NAME = 'weather_db'\nUSER_COLLECTION = 'users'\nDEVICE_COLLECTION = 'devices'\nWEATHER_DATA_COLLECTION = 'weather_data'\nDAILY_REPORT_MODEL = 'daily_report_model'\ndb_handle = MongoClient(f'mongodb://{HOST}:{PORT}')\ndb_handle.drop_database(DB_NAME)\nweather_dbh = db_handle[DB_NAME]\nwith open(RELATIVE_CONFIG_PATH + USER_COLLECTION + '.csv', 'r') as user_fh:\n for user_row in user_fh:\n user_row = user_row.rstrip()\n if user_row:\n username, email, role = user_row.split(',')\n user_data = {'username': username, 'email': email, 'role': role}\n user_collection = weather_dbh[USER_COLLECTION]\n user_collection.insert_one(user_data)\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, desc, type, manufacturer = device_row.split(',')\n device_data = {'device_id': device_id, 'desc': desc, 'type': type,\n 'manufacturer': manufacturer}\n device_collection = weather_dbh[DEVICE_COLLECTION]\n device_collection.insert_one(device_data)\ndevdaylist = []\nwith open(RELATIVE_CONFIG_PATH + DEVICE_COLLECTION + '.csv', 'r') as device_fh:\n for device_row in device_fh:\n device_row = device_row.rstrip()\n if device_row:\n device_id, _, type, _ = device_row.split(',')\n for day in range(1, 6):\n day1 = datetime(2020, 12, day)\n devdaylist.append((device_id, day1))\n for hour in range(0, 24):\n timestamp = datetime(2020, 12, day, hour, 30, 0)\n if type.lower() == 'temperature':\n value = int(random.normalvariate(24, 2.2))\n elif type.lower() == 'humidity':\n value = int(random.normalvariate(45, 3))\n weather_data = {'device_id': device_id, 'value': value,\n 'timestamp': timestamp}\n weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]\n weather_data_collection.insert_one(weather_data)\ndrm = DailyReportModel()\nfor ddy in devdaylist:\n drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')\n",
"step-5": "import random\r\n# Imports MongoClient for base level access to the local MongoDB\r\nfrom pymongo import MongoClient\r\n# Imports datetime class to create timestamp for weather data storage\r\nfrom datetime import datetime\r\n# Importing DailyReportModel class to use the implemented method to insert data into daily_report_model collection\r\nfrom model import DailyReportModel\r\n\r\n\r\n# Database host ip and port information\r\nHOST = '127.0.0.1'\r\nPORT = '27017'\r\n\r\nRELATIVE_CONFIG_PATH = '../config/'\r\n\r\nDB_NAME = 'weather_db'\r\nUSER_COLLECTION = 'users'\r\nDEVICE_COLLECTION = 'devices'\r\nWEATHER_DATA_COLLECTION = 'weather_data'\r\nDAILY_REPORT_MODEL = 'daily_report_model'\r\n\r\n# This will initiate connection to the mongodb\r\ndb_handle = MongoClient(f'mongodb://{HOST}:{PORT}')\r\n\r\n# We drop the existing database including all the collections and data\r\ndb_handle.drop_database(DB_NAME)\r\n\r\n# We recreate the database with the same name\r\nweather_dbh = db_handle[DB_NAME]\r\n\r\n\r\n# user data import\r\n# User document contains username (String), email (String), and role (String) fields\r\n# Reads users.csv one line at a time, splits them into the data fields and inserts\r\nwith open(RELATIVE_CONFIG_PATH+USER_COLLECTION+'.csv', 'r') as user_fh:\r\n for user_row in user_fh:\r\n user_row = user_row.rstrip()\r\n if user_row:\r\n (username, email, role) = user_row.split(',')\r\n user_data = {'username': username, 'email': email, 'role': role}\r\n \r\n # This creates and return a pointer to the users collection\r\n user_collection = weather_dbh[USER_COLLECTION]\r\n \r\n # This inserts the data item as a document in the user collection\r\n user_collection.insert_one(user_data)\r\n\r\n\r\n# device data import\r\n# Device document contains device_id (String), desc (String), type (String - temperature/humidity) and manufacturer (String) fields\r\n# Reads devices.csv one line at a time, splits them into the data fields and inserts\r\nwith open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:\r\n for device_row in device_fh:\r\n device_row = device_row.rstrip()\r\n if device_row:\r\n (device_id, desc, type, manufacturer) = device_row.split(',')\r\n device_data = {'device_id': device_id, 'desc': desc, 'type': type, 'manufacturer': manufacturer}\r\n \r\n # This creates and return a pointer to the devices collection\r\n device_collection = weather_dbh[DEVICE_COLLECTION]\r\n \r\n # This inserts the data item as a document in the devices collection\r\n device_collection.insert_one(device_data)\r\n\r\n\r\n# weather data generation\r\n# Weather data document contains device_id (String), value (Integer), and timestamp (Date) fields\r\n# Reads devices.csv one line at a time to get device id and type. It then loops for five days (2020-12-01 to 2020-12-05\r\n# For each device and day, it creates random values for each hour (at the 30 minute mark) and stores the data\r\n\r\n#Created a list to populate it with device id and timestamp\r\ndevdaylist = []\r\nwith open(RELATIVE_CONFIG_PATH+DEVICE_COLLECTION+'.csv', 'r') as device_fh:\r\n for device_row in device_fh:\r\n device_row = device_row.rstrip()\r\n if device_row:\r\n # _ can be used to ignore values that are not needed\r\n (device_id, _, type, _) = device_row.split(',')\r\n for day in range(1,6):\r\n #creating and appending data to the list\r\n day1 = datetime(2020, 12, day)\r\n devdaylist.append((device_id, day1))\r\n for hour in range(0,24):\r\n timestamp = datetime(2020, 12, day, hour, 30, 0)\r\n # Generates random data value in appropriate range as per the type of sensor (normal bell-curve distribution)\r\n if (type.lower() == 'temperature'):\r\n value = int(random.normalvariate(24,2.2))\r\n elif (type.lower() == 'humidity'):\r\n value = int(random.normalvariate(45,3))\r\n weather_data = {'device_id': device_id, 'value': value, 'timestamp': timestamp}\r\n weather_data_collection = weather_dbh[WEATHER_DATA_COLLECTION]\r\n \r\n # This inserts the data item as a document in the weather_data collection\r\n weather_data_collection.insert_one(weather_data)\r\n \r\n\r\n\r\n#Populating the data to daily_report_model collection on setup\r\ndrm = DailyReportModel()\r\nfor ddy in devdaylist:\r\n drm.insert_daily_report_to_daily_report_model(ddy[0], ddy[1], 'admin')\r\n \r\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(123)
<|reserved_special_token_0|>
tf.enable_eager_execution()
tf.set_random_seed(123)
<|reserved_special_token_0|>
gen.add(tf.keras.layers.Dense(H, input_dim=P + R, activation=tf.keras.
activations.elu))
gen.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))
gen.add(tf.keras.layers.Dense(Q))
<|reserved_special_token_0|>
disc.add(tf.keras.layers.Dense(H, input_dim=P + Q, activation=tf.keras.
activations.elu))
disc.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))
disc.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))
gen.summary()
disc.summary()
disc.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),
'binary_crossentropy')
<|reserved_special_token_0|>
both_mod.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),
'binary_crossentropy')
for epoch in tqdm(range(epochs)):
some_noise = np.random.normal(size=[N, R])
gen_dat = gen.predict(np.hstack([x, some_noise]))
disc.trainable = True
with tf.GradientTape() as td:
with tf.GradientTape() as t:
preds_real = disc(tf.cast(np.hstack([x, y.reshape([N, Q])]), tf
.float32))
preds_fake = disc(tf.cast(np.hstack([x, gen_dat]), tf.float32))
dl_real = tf.reduce_mean(keras.losses.binary_crossentropy(np.
ones(N).reshape([N, 1]), tf.cast(preds_real, tf.float64)))
dl_fake = tf.reduce_mean(keras.losses.binary_crossentropy(np.
zeros(N).reshape([N, 1]), tf.cast(preds_fake, tf.float64)))
dl = 0.5 * tf.add(dl_real, dl_fake)
grads = t.gradient(dl, disc.trainable_variables)
grads_norm = 0
for i in range(len(grads)):
grads_norm += tf.reduce_mean(tf.square(grads[i]))
grads_norm /= float(len(grads))
double_grads = td.gradient(grads_norm, disc.trainable_variables)
grads_n_vars = [(grads[i] + doubleback_const * double_grads[i], disc.
trainable_variables[i]) for i in range(len(grads))]
disc.optimizer.apply_gradients(grads_n_vars)
disc.trainable = False
with tf.GradientTape() as td:
with tf.GradientTape() as t:
preds = both_mod([tf.cast(x, tf.float32), tf.cast(some_noise,
tf.float32)])
bl = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N)
.reshape([N, 1]), tf.cast(preds, tf.float64)))
grads = t.gradient(bl, both_mod.trainable_variables)
grads_norm = 0
for i in range(len(grads)):
grads_norm += tf.reduce_mean(tf.square(grads[i]))
grads_norm /= float(len(grads))
double_grads = td.gradient(grads_norm, both_mod.trainable_variables)
grads_n_vars = [(grads[i] + doubleback_const * double_grads[i],
both_mod.trainable_variables[i]) for i in range(len(grads))]
both_mod.optimizer.apply_gradients(grads_n_vars)
<|reserved_special_token_0|>
plt.scatter(x, y)
<|reserved_special_token_0|>
plt.scatter(x, preds)
plt.savefig('temp.pdf')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
np.random.seed(123)
<|reserved_special_token_0|>
tf.enable_eager_execution()
tf.set_random_seed(123)
P = 1
R = 1
Q = 1
H = 20
epochs = 1000
doubleback_const = 1
mcycle = np.genfromtxt('./data/mcycle.csv', delimiter=',', skip_header=1)
N = mcycle.shape[0]
x = mcycle[:, 0].reshape([N, P])
y = mcycle[:, 1].reshape([N, Q])
x = (x - np.mean(x)) / np.std(x)
y = (y - np.mean(y)) / np.std(y)
gen = tf.keras.Sequential()
gen.add(tf.keras.layers.Dense(H, input_dim=P + R, activation=tf.keras.
activations.elu))
gen.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))
gen.add(tf.keras.layers.Dense(Q))
disc = tf.keras.Sequential()
disc.add(tf.keras.layers.Dense(H, input_dim=P + Q, activation=tf.keras.
activations.elu))
disc.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))
disc.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))
gen.summary()
disc.summary()
disc.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),
'binary_crossentropy')
noise = tf.keras.layers.Input(shape=(R,))
xdat = tf.keras.layers.Input(shape=(P,))
genin = tf.keras.layers.concatenate([xdat, noise])
genout = gen(genin)
discin = tf.keras.layers.concatenate([xdat, genout])
validity = disc(discin)
both_mod = tf.keras.models.Model([xdat, noise], validity)
both_mod.layers[5].trainable = False
both_mod.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),
'binary_crossentropy')
for epoch in tqdm(range(epochs)):
some_noise = np.random.normal(size=[N, R])
gen_dat = gen.predict(np.hstack([x, some_noise]))
disc.trainable = True
with tf.GradientTape() as td:
with tf.GradientTape() as t:
preds_real = disc(tf.cast(np.hstack([x, y.reshape([N, Q])]), tf
.float32))
preds_fake = disc(tf.cast(np.hstack([x, gen_dat]), tf.float32))
dl_real = tf.reduce_mean(keras.losses.binary_crossentropy(np.
ones(N).reshape([N, 1]), tf.cast(preds_real, tf.float64)))
dl_fake = tf.reduce_mean(keras.losses.binary_crossentropy(np.
zeros(N).reshape([N, 1]), tf.cast(preds_fake, tf.float64)))
dl = 0.5 * tf.add(dl_real, dl_fake)
grads = t.gradient(dl, disc.trainable_variables)
grads_norm = 0
for i in range(len(grads)):
grads_norm += tf.reduce_mean(tf.square(grads[i]))
grads_norm /= float(len(grads))
double_grads = td.gradient(grads_norm, disc.trainable_variables)
grads_n_vars = [(grads[i] + doubleback_const * double_grads[i], disc.
trainable_variables[i]) for i in range(len(grads))]
disc.optimizer.apply_gradients(grads_n_vars)
disc.trainable = False
with tf.GradientTape() as td:
with tf.GradientTape() as t:
preds = both_mod([tf.cast(x, tf.float32), tf.cast(some_noise,
tf.float32)])
bl = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N)
.reshape([N, 1]), tf.cast(preds, tf.float64)))
grads = t.gradient(bl, both_mod.trainable_variables)
grads_norm = 0
for i in range(len(grads)):
grads_norm += tf.reduce_mean(tf.square(grads[i]))
grads_norm /= float(len(grads))
double_grads = td.gradient(grads_norm, both_mod.trainable_variables)
grads_n_vars = [(grads[i] + doubleback_const * double_grads[i],
both_mod.trainable_variables[i]) for i in range(len(grads))]
both_mod.optimizer.apply_gradients(grads_n_vars)
fig = plt.figure()
plt.scatter(x, y)
some_noise = np.random.normal(size=[N, P])
preds = gen.predict(np.hstack([x, some_noise]))
plt.scatter(x, preds)
plt.savefig('temp.pdf')
<|reserved_special_token_1|>
import keras
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
np.random.seed(123)
import tensorflow as tf
from scipy.optimize import line_search
tf.enable_eager_execution()
tf.set_random_seed(123)
P = 1
R = 1
Q = 1
H = 20
epochs = 1000
doubleback_const = 1
mcycle = np.genfromtxt('./data/mcycle.csv', delimiter=',', skip_header=1)
N = mcycle.shape[0]
x = mcycle[:, 0].reshape([N, P])
y = mcycle[:, 1].reshape([N, Q])
x = (x - np.mean(x)) / np.std(x)
y = (y - np.mean(y)) / np.std(y)
gen = tf.keras.Sequential()
gen.add(tf.keras.layers.Dense(H, input_dim=P + R, activation=tf.keras.
activations.elu))
gen.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))
gen.add(tf.keras.layers.Dense(Q))
disc = tf.keras.Sequential()
disc.add(tf.keras.layers.Dense(H, input_dim=P + Q, activation=tf.keras.
activations.elu))
disc.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))
disc.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))
gen.summary()
disc.summary()
disc.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),
'binary_crossentropy')
noise = tf.keras.layers.Input(shape=(R,))
xdat = tf.keras.layers.Input(shape=(P,))
genin = tf.keras.layers.concatenate([xdat, noise])
genout = gen(genin)
discin = tf.keras.layers.concatenate([xdat, genout])
validity = disc(discin)
both_mod = tf.keras.models.Model([xdat, noise], validity)
both_mod.layers[5].trainable = False
both_mod.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),
'binary_crossentropy')
for epoch in tqdm(range(epochs)):
some_noise = np.random.normal(size=[N, R])
gen_dat = gen.predict(np.hstack([x, some_noise]))
disc.trainable = True
with tf.GradientTape() as td:
with tf.GradientTape() as t:
preds_real = disc(tf.cast(np.hstack([x, y.reshape([N, Q])]), tf
.float32))
preds_fake = disc(tf.cast(np.hstack([x, gen_dat]), tf.float32))
dl_real = tf.reduce_mean(keras.losses.binary_crossentropy(np.
ones(N).reshape([N, 1]), tf.cast(preds_real, tf.float64)))
dl_fake = tf.reduce_mean(keras.losses.binary_crossentropy(np.
zeros(N).reshape([N, 1]), tf.cast(preds_fake, tf.float64)))
dl = 0.5 * tf.add(dl_real, dl_fake)
grads = t.gradient(dl, disc.trainable_variables)
grads_norm = 0
for i in range(len(grads)):
grads_norm += tf.reduce_mean(tf.square(grads[i]))
grads_norm /= float(len(grads))
double_grads = td.gradient(grads_norm, disc.trainable_variables)
grads_n_vars = [(grads[i] + doubleback_const * double_grads[i], disc.
trainable_variables[i]) for i in range(len(grads))]
disc.optimizer.apply_gradients(grads_n_vars)
disc.trainable = False
with tf.GradientTape() as td:
with tf.GradientTape() as t:
preds = both_mod([tf.cast(x, tf.float32), tf.cast(some_noise,
tf.float32)])
bl = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N)
.reshape([N, 1]), tf.cast(preds, tf.float64)))
grads = t.gradient(bl, both_mod.trainable_variables)
grads_norm = 0
for i in range(len(grads)):
grads_norm += tf.reduce_mean(tf.square(grads[i]))
grads_norm /= float(len(grads))
double_grads = td.gradient(grads_norm, both_mod.trainable_variables)
grads_n_vars = [(grads[i] + doubleback_const * double_grads[i],
both_mod.trainable_variables[i]) for i in range(len(grads))]
both_mod.optimizer.apply_gradients(grads_n_vars)
fig = plt.figure()
plt.scatter(x, y)
some_noise = np.random.normal(size=[N, P])
preds = gen.predict(np.hstack([x, some_noise]))
plt.scatter(x, preds)
plt.savefig('temp.pdf')
<|reserved_special_token_1|>
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# python/motorcycle.py Author "Nathan Wycoff <[email protected]>" Date 06.23.2019
# Run a CGAN on the motorcycle data.
import keras
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
np.random.seed(123)
import tensorflow as tf
from scipy.optimize import line_search
tf.enable_eager_execution()
tf.set_random_seed(123)
P = 1 # Dim of X data (to be conditioned on)
R = 1 # Dim of latent error variable
Q = 1 # Dim of y data (to be generated)
H = 20# Number of hidden units
epochs = 1000
doubleback_const = 1
# Load and pre-process data
mcycle = np.genfromtxt('./data/mcycle.csv', delimiter=',', skip_header = 1)
N = mcycle.shape[0]
x = mcycle[:,0].reshape([N,P])
y = mcycle[:,1].reshape([N,Q])
#x /= max(x)
#y = (y-min(y)) / (max(y) - min(y))
x = (x - np.mean(x)) / np.std(x)
y = (y - np.mean(y)) / np.std(y)
# Build the generator, accepts X and Z as inputs
gen = tf.keras.Sequential()
gen.add(tf.keras.layers.Dense(H, input_dim = P + R, activation = tf.keras.activations.elu))
gen.add(tf.keras.layers.Dense(H, activation = tf.keras.activations.elu))
gen.add(tf.keras.layers.Dense(Q))
# Build the discriminator, accepts an X and a Y as inputs.
disc = tf.keras.Sequential()
disc.add(tf.keras.layers.Dense(H, input_dim = P + Q, activation = tf.keras.activations.elu))
disc.add(tf.keras.layers.Dense(H, activation = tf.keras.activations.elu))
disc.add(tf.keras.layers.Dense(1, activation = tf.keras.activations.sigmoid))
gen.summary()
disc.summary()
# NOTE: Compilation of discriminator needs to occur BEFORE we set its weights untrainable below, as these changes will not be reflected until disc is compiled again. So also be wary of compiling disc later, as its weights may not change.
#TODO: the above is a mess, find a better way.
#disc.compile(tf.keras.optimizers.Adam(), 'binary_crossentropy')
disc.compile(tf.train.GradientDescentOptimizer(learning_rate = 1.0), 'binary_crossentropy')
noise = tf.keras.layers.Input(shape = (R,))
xdat = tf.keras.layers.Input(shape = (P,))
genin = tf.keras.layers.concatenate([xdat, noise])
genout = gen(genin)
discin = tf.keras.layers.concatenate([xdat, genout])
validity = disc(discin)
#NOTE: Next lin possible issue in ordering of inputs?
both_mod = tf.keras.models.Model([xdat, noise], validity)
both_mod.layers[5].trainable = False
#both_mod.compile(tf.keras.optimizers.Adam(), 'binary_crossentropy')
#both_mod.compile(tf.train.AdamOptimizer(), 'binary_crossentropy')
both_mod.compile(tf.train.GradientDescentOptimizer(learning_rate = 1.0), 'binary_crossentropy')
## Custom training with double backprop
#genloss = lambda: both_mod.output
#genopt = tf.keras.optimizers.Adam(genloss, both_mod.trainable_variables)
# Do the training!
for epoch in tqdm(range(epochs)):
# Sample some noise
#TODO: Batch size
some_noise = np.random.normal(size=[N,R])
gen_dat = gen.predict(np.hstack([x, some_noise]))
# Train discriminator
#NOTE: Minor discrepency in losses from the manual loop below and from keras's built in: follow up if there appears to be bugs.
#disc_rl = disc.train_on_batch(np.hstack([x, y]), np.ones(N))
#disc_fl = disc.train_on_batch(np.hstack([x, gen_dat]), np.zeros(N))
#disc_loss = 0.5 * np.add(disc_rl, disc_fl)
disc.trainable = True
with tf.GradientTape() as td:
with tf.GradientTape() as t:
#preds_real = disc(tf.cast(np.concatenate([x, y]).reshape([N,P+Q]), tf.float32))
#preds_fake = disc(tf.cast(np.concatenate([x, gen_dat]).reshape([N,P+Q]), tf.float32))
preds_real = disc(tf.cast(np.hstack([x, y.reshape([N,Q])]), tf.float32))
preds_fake = disc(tf.cast(np.hstack([x, gen_dat]), tf.float32))
dl_real = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N).reshape([N,1]), tf.cast(preds_real, tf.float64)))
dl_fake = tf.reduce_mean(keras.losses.binary_crossentropy(np.zeros(N).reshape([N,1]), tf.cast(preds_fake, tf.float64)))
dl = 0.5*tf.add(dl_real, dl_fake)
grads = t.gradient(dl, disc.trainable_variables)
grads_norm = 0
for i in range(len(grads)):
#grads_norm += tf.reduce_sum(tf.square(grads[i]))
grads_norm += tf.reduce_mean(tf.square(grads[i]))
grads_norm /= float(len(grads))
double_grads = td.gradient(grads_norm, disc.trainable_variables)
grads_n_vars = [(grads[i] + doubleback_const * double_grads[i], disc.trainable_variables[i]) for i in range(len(grads))]
disc.optimizer.apply_gradients(grads_n_vars)
disc.trainable = False
# Train generator
#both_mod.train_on_batch([x, some_noise], np.ones(N))
# Manually compute and apply gradient
with tf.GradientTape() as td:
with tf.GradientTape() as t:
preds = both_mod([tf.cast(x, tf.float32), tf.cast(some_noise, tf.float32)])
bl = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N).reshape([N,1]), tf.cast(preds, tf.float64)))
#bl = tf.losses.sigmoid_cross_entropy(preds, np.ones(N).reshape([N,1]))
grads = t.gradient(bl, both_mod.trainable_variables)
grads_norm = 0
for i in range(len(grads)):
#grads_norm += tf.reduce_sum(tf.square(grads[i]))
grads_norm += tf.reduce_mean(tf.square(grads[i]))
grads_norm /= float(len(grads))
double_grads = td.gradient(grads_norm, both_mod.trainable_variables)
grads_n_vars = [(grads[i] + doubleback_const*double_grads[i], both_mod.trainable_variables[i]) for i in range(len(grads))]
both_mod.optimizer.apply_gradients(grads_n_vars)
# Plot the results
fig = plt.figure()
plt.scatter(x, y)
some_noise = np.random.normal(size=[N,P])
preds = gen.predict(np.hstack([x, some_noise]))
plt.scatter(x, preds)
#plt.savefig("images/motor_scatter.pdf")
plt.savefig("temp.pdf")
|
flexible
|
{
"blob_id": "aba3e0907e59bc5125759e90d3c784ceb97fca80",
"index": 9941,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnp.random.seed(123)\n<mask token>\ntf.enable_eager_execution()\ntf.set_random_seed(123)\n<mask token>\ngen.add(tf.keras.layers.Dense(H, input_dim=P + R, activation=tf.keras.\n activations.elu))\ngen.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))\ngen.add(tf.keras.layers.Dense(Q))\n<mask token>\ndisc.add(tf.keras.layers.Dense(H, input_dim=P + Q, activation=tf.keras.\n activations.elu))\ndisc.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))\ndisc.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))\ngen.summary()\ndisc.summary()\ndisc.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),\n 'binary_crossentropy')\n<mask token>\nboth_mod.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),\n 'binary_crossentropy')\nfor epoch in tqdm(range(epochs)):\n some_noise = np.random.normal(size=[N, R])\n gen_dat = gen.predict(np.hstack([x, some_noise]))\n disc.trainable = True\n with tf.GradientTape() as td:\n with tf.GradientTape() as t:\n preds_real = disc(tf.cast(np.hstack([x, y.reshape([N, Q])]), tf\n .float32))\n preds_fake = disc(tf.cast(np.hstack([x, gen_dat]), tf.float32))\n dl_real = tf.reduce_mean(keras.losses.binary_crossentropy(np.\n ones(N).reshape([N, 1]), tf.cast(preds_real, tf.float64)))\n dl_fake = tf.reduce_mean(keras.losses.binary_crossentropy(np.\n zeros(N).reshape([N, 1]), tf.cast(preds_fake, tf.float64)))\n dl = 0.5 * tf.add(dl_real, dl_fake)\n grads = t.gradient(dl, disc.trainable_variables)\n grads_norm = 0\n for i in range(len(grads)):\n grads_norm += tf.reduce_mean(tf.square(grads[i]))\n grads_norm /= float(len(grads))\n double_grads = td.gradient(grads_norm, disc.trainable_variables)\n grads_n_vars = [(grads[i] + doubleback_const * double_grads[i], disc.\n trainable_variables[i]) for i in range(len(grads))]\n disc.optimizer.apply_gradients(grads_n_vars)\n disc.trainable = False\n with tf.GradientTape() as td:\n with tf.GradientTape() as t:\n preds = both_mod([tf.cast(x, tf.float32), tf.cast(some_noise,\n tf.float32)])\n bl = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N)\n .reshape([N, 1]), tf.cast(preds, tf.float64)))\n grads = t.gradient(bl, both_mod.trainable_variables)\n grads_norm = 0\n for i in range(len(grads)):\n grads_norm += tf.reduce_mean(tf.square(grads[i]))\n grads_norm /= float(len(grads))\n double_grads = td.gradient(grads_norm, both_mod.trainable_variables)\n grads_n_vars = [(grads[i] + doubleback_const * double_grads[i],\n both_mod.trainable_variables[i]) for i in range(len(grads))]\n both_mod.optimizer.apply_gradients(grads_n_vars)\n<mask token>\nplt.scatter(x, y)\n<mask token>\nplt.scatter(x, preds)\nplt.savefig('temp.pdf')\n",
"step-3": "<mask token>\nnp.random.seed(123)\n<mask token>\ntf.enable_eager_execution()\ntf.set_random_seed(123)\nP = 1\nR = 1\nQ = 1\nH = 20\nepochs = 1000\ndoubleback_const = 1\nmcycle = np.genfromtxt('./data/mcycle.csv', delimiter=',', skip_header=1)\nN = mcycle.shape[0]\nx = mcycle[:, 0].reshape([N, P])\ny = mcycle[:, 1].reshape([N, Q])\nx = (x - np.mean(x)) / np.std(x)\ny = (y - np.mean(y)) / np.std(y)\ngen = tf.keras.Sequential()\ngen.add(tf.keras.layers.Dense(H, input_dim=P + R, activation=tf.keras.\n activations.elu))\ngen.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))\ngen.add(tf.keras.layers.Dense(Q))\ndisc = tf.keras.Sequential()\ndisc.add(tf.keras.layers.Dense(H, input_dim=P + Q, activation=tf.keras.\n activations.elu))\ndisc.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))\ndisc.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))\ngen.summary()\ndisc.summary()\ndisc.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),\n 'binary_crossentropy')\nnoise = tf.keras.layers.Input(shape=(R,))\nxdat = tf.keras.layers.Input(shape=(P,))\ngenin = tf.keras.layers.concatenate([xdat, noise])\ngenout = gen(genin)\ndiscin = tf.keras.layers.concatenate([xdat, genout])\nvalidity = disc(discin)\nboth_mod = tf.keras.models.Model([xdat, noise], validity)\nboth_mod.layers[5].trainable = False\nboth_mod.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),\n 'binary_crossentropy')\nfor epoch in tqdm(range(epochs)):\n some_noise = np.random.normal(size=[N, R])\n gen_dat = gen.predict(np.hstack([x, some_noise]))\n disc.trainable = True\n with tf.GradientTape() as td:\n with tf.GradientTape() as t:\n preds_real = disc(tf.cast(np.hstack([x, y.reshape([N, Q])]), tf\n .float32))\n preds_fake = disc(tf.cast(np.hstack([x, gen_dat]), tf.float32))\n dl_real = tf.reduce_mean(keras.losses.binary_crossentropy(np.\n ones(N).reshape([N, 1]), tf.cast(preds_real, tf.float64)))\n dl_fake = tf.reduce_mean(keras.losses.binary_crossentropy(np.\n zeros(N).reshape([N, 1]), tf.cast(preds_fake, tf.float64)))\n dl = 0.5 * tf.add(dl_real, dl_fake)\n grads = t.gradient(dl, disc.trainable_variables)\n grads_norm = 0\n for i in range(len(grads)):\n grads_norm += tf.reduce_mean(tf.square(grads[i]))\n grads_norm /= float(len(grads))\n double_grads = td.gradient(grads_norm, disc.trainable_variables)\n grads_n_vars = [(grads[i] + doubleback_const * double_grads[i], disc.\n trainable_variables[i]) for i in range(len(grads))]\n disc.optimizer.apply_gradients(grads_n_vars)\n disc.trainable = False\n with tf.GradientTape() as td:\n with tf.GradientTape() as t:\n preds = both_mod([tf.cast(x, tf.float32), tf.cast(some_noise,\n tf.float32)])\n bl = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N)\n .reshape([N, 1]), tf.cast(preds, tf.float64)))\n grads = t.gradient(bl, both_mod.trainable_variables)\n grads_norm = 0\n for i in range(len(grads)):\n grads_norm += tf.reduce_mean(tf.square(grads[i]))\n grads_norm /= float(len(grads))\n double_grads = td.gradient(grads_norm, both_mod.trainable_variables)\n grads_n_vars = [(grads[i] + doubleback_const * double_grads[i],\n both_mod.trainable_variables[i]) for i in range(len(grads))]\n both_mod.optimizer.apply_gradients(grads_n_vars)\nfig = plt.figure()\nplt.scatter(x, y)\nsome_noise = np.random.normal(size=[N, P])\npreds = gen.predict(np.hstack([x, some_noise]))\nplt.scatter(x, preds)\nplt.savefig('temp.pdf')\n",
"step-4": "import keras\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nnp.random.seed(123)\nimport tensorflow as tf\nfrom scipy.optimize import line_search\ntf.enable_eager_execution()\ntf.set_random_seed(123)\nP = 1\nR = 1\nQ = 1\nH = 20\nepochs = 1000\ndoubleback_const = 1\nmcycle = np.genfromtxt('./data/mcycle.csv', delimiter=',', skip_header=1)\nN = mcycle.shape[0]\nx = mcycle[:, 0].reshape([N, P])\ny = mcycle[:, 1].reshape([N, Q])\nx = (x - np.mean(x)) / np.std(x)\ny = (y - np.mean(y)) / np.std(y)\ngen = tf.keras.Sequential()\ngen.add(tf.keras.layers.Dense(H, input_dim=P + R, activation=tf.keras.\n activations.elu))\ngen.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))\ngen.add(tf.keras.layers.Dense(Q))\ndisc = tf.keras.Sequential()\ndisc.add(tf.keras.layers.Dense(H, input_dim=P + Q, activation=tf.keras.\n activations.elu))\ndisc.add(tf.keras.layers.Dense(H, activation=tf.keras.activations.elu))\ndisc.add(tf.keras.layers.Dense(1, activation=tf.keras.activations.sigmoid))\ngen.summary()\ndisc.summary()\ndisc.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),\n 'binary_crossentropy')\nnoise = tf.keras.layers.Input(shape=(R,))\nxdat = tf.keras.layers.Input(shape=(P,))\ngenin = tf.keras.layers.concatenate([xdat, noise])\ngenout = gen(genin)\ndiscin = tf.keras.layers.concatenate([xdat, genout])\nvalidity = disc(discin)\nboth_mod = tf.keras.models.Model([xdat, noise], validity)\nboth_mod.layers[5].trainable = False\nboth_mod.compile(tf.train.GradientDescentOptimizer(learning_rate=1.0),\n 'binary_crossentropy')\nfor epoch in tqdm(range(epochs)):\n some_noise = np.random.normal(size=[N, R])\n gen_dat = gen.predict(np.hstack([x, some_noise]))\n disc.trainable = True\n with tf.GradientTape() as td:\n with tf.GradientTape() as t:\n preds_real = disc(tf.cast(np.hstack([x, y.reshape([N, Q])]), tf\n .float32))\n preds_fake = disc(tf.cast(np.hstack([x, gen_dat]), tf.float32))\n dl_real = tf.reduce_mean(keras.losses.binary_crossentropy(np.\n ones(N).reshape([N, 1]), tf.cast(preds_real, tf.float64)))\n dl_fake = tf.reduce_mean(keras.losses.binary_crossentropy(np.\n zeros(N).reshape([N, 1]), tf.cast(preds_fake, tf.float64)))\n dl = 0.5 * tf.add(dl_real, dl_fake)\n grads = t.gradient(dl, disc.trainable_variables)\n grads_norm = 0\n for i in range(len(grads)):\n grads_norm += tf.reduce_mean(tf.square(grads[i]))\n grads_norm /= float(len(grads))\n double_grads = td.gradient(grads_norm, disc.trainable_variables)\n grads_n_vars = [(grads[i] + doubleback_const * double_grads[i], disc.\n trainable_variables[i]) for i in range(len(grads))]\n disc.optimizer.apply_gradients(grads_n_vars)\n disc.trainable = False\n with tf.GradientTape() as td:\n with tf.GradientTape() as t:\n preds = both_mod([tf.cast(x, tf.float32), tf.cast(some_noise,\n tf.float32)])\n bl = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N)\n .reshape([N, 1]), tf.cast(preds, tf.float64)))\n grads = t.gradient(bl, both_mod.trainable_variables)\n grads_norm = 0\n for i in range(len(grads)):\n grads_norm += tf.reduce_mean(tf.square(grads[i]))\n grads_norm /= float(len(grads))\n double_grads = td.gradient(grads_norm, both_mod.trainable_variables)\n grads_n_vars = [(grads[i] + doubleback_const * double_grads[i],\n both_mod.trainable_variables[i]) for i in range(len(grads))]\n both_mod.optimizer.apply_gradients(grads_n_vars)\nfig = plt.figure()\nplt.scatter(x, y)\nsome_noise = np.random.normal(size=[N, P])\npreds = gen.predict(np.hstack([x, some_noise]))\nplt.scatter(x, preds)\nplt.savefig('temp.pdf')\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# python/motorcycle.py Author \"Nathan Wycoff <[email protected]>\" Date 06.23.2019\n\n# Run a CGAN on the motorcycle data.\nimport keras\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\n\nnp.random.seed(123)\nimport tensorflow as tf\nfrom scipy.optimize import line_search\ntf.enable_eager_execution()\ntf.set_random_seed(123)\n\nP = 1 # Dim of X data (to be conditioned on)\nR = 1 # Dim of latent error variable\nQ = 1 # Dim of y data (to be generated)\nH = 20# Number of hidden units\nepochs = 1000\ndoubleback_const = 1\n\n# Load and pre-process data\nmcycle = np.genfromtxt('./data/mcycle.csv', delimiter=',', skip_header = 1)\nN = mcycle.shape[0]\nx = mcycle[:,0].reshape([N,P])\ny = mcycle[:,1].reshape([N,Q])\n#x /= max(x)\n#y = (y-min(y)) / (max(y) - min(y))\nx = (x - np.mean(x)) / np.std(x)\ny = (y - np.mean(y)) / np.std(y)\n\n# Build the generator, accepts X and Z as inputs\ngen = tf.keras.Sequential()\ngen.add(tf.keras.layers.Dense(H, input_dim = P + R, activation = tf.keras.activations.elu))\ngen.add(tf.keras.layers.Dense(H, activation = tf.keras.activations.elu))\ngen.add(tf.keras.layers.Dense(Q))\n\n# Build the discriminator, accepts an X and a Y as inputs.\ndisc = tf.keras.Sequential()\ndisc.add(tf.keras.layers.Dense(H, input_dim = P + Q, activation = tf.keras.activations.elu))\ndisc.add(tf.keras.layers.Dense(H, activation = tf.keras.activations.elu))\ndisc.add(tf.keras.layers.Dense(1, activation = tf.keras.activations.sigmoid))\n\ngen.summary()\ndisc.summary()\n\n# NOTE: Compilation of discriminator needs to occur BEFORE we set its weights untrainable below, as these changes will not be reflected until disc is compiled again. So also be wary of compiling disc later, as its weights may not change.\n#TODO: the above is a mess, find a better way.\n#disc.compile(tf.keras.optimizers.Adam(), 'binary_crossentropy')\ndisc.compile(tf.train.GradientDescentOptimizer(learning_rate = 1.0), 'binary_crossentropy')\n\nnoise = tf.keras.layers.Input(shape = (R,))\nxdat = tf.keras.layers.Input(shape = (P,))\n\ngenin = tf.keras.layers.concatenate([xdat, noise])\ngenout = gen(genin)\n\ndiscin = tf.keras.layers.concatenate([xdat, genout])\nvalidity = disc(discin)\n\n#NOTE: Next lin possible issue in ordering of inputs?\nboth_mod = tf.keras.models.Model([xdat, noise], validity)\nboth_mod.layers[5].trainable = False\n\n#both_mod.compile(tf.keras.optimizers.Adam(), 'binary_crossentropy')\n#both_mod.compile(tf.train.AdamOptimizer(), 'binary_crossentropy')\nboth_mod.compile(tf.train.GradientDescentOptimizer(learning_rate = 1.0), 'binary_crossentropy')\n\n## Custom training with double backprop\n#genloss = lambda: both_mod.output\n#genopt = tf.keras.optimizers.Adam(genloss, both_mod.trainable_variables)\n\n# Do the training!\nfor epoch in tqdm(range(epochs)):\n # Sample some noise\n #TODO: Batch size\n some_noise = np.random.normal(size=[N,R])\n\n gen_dat = gen.predict(np.hstack([x, some_noise]))\n\n # Train discriminator\n #NOTE: Minor discrepency in losses from the manual loop below and from keras's built in: follow up if there appears to be bugs.\n #disc_rl = disc.train_on_batch(np.hstack([x, y]), np.ones(N))\n #disc_fl = disc.train_on_batch(np.hstack([x, gen_dat]), np.zeros(N))\n #disc_loss = 0.5 * np.add(disc_rl, disc_fl)\n\n disc.trainable = True\n with tf.GradientTape() as td:\n with tf.GradientTape() as t:\n #preds_real = disc(tf.cast(np.concatenate([x, y]).reshape([N,P+Q]), tf.float32))\n #preds_fake = disc(tf.cast(np.concatenate([x, gen_dat]).reshape([N,P+Q]), tf.float32))\n preds_real = disc(tf.cast(np.hstack([x, y.reshape([N,Q])]), tf.float32))\n preds_fake = disc(tf.cast(np.hstack([x, gen_dat]), tf.float32))\n dl_real = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N).reshape([N,1]), tf.cast(preds_real, tf.float64)))\n dl_fake = tf.reduce_mean(keras.losses.binary_crossentropy(np.zeros(N).reshape([N,1]), tf.cast(preds_fake, tf.float64)))\n dl = 0.5*tf.add(dl_real, dl_fake)\n\n grads = t.gradient(dl, disc.trainable_variables)\n grads_norm = 0\n for i in range(len(grads)):\n #grads_norm += tf.reduce_sum(tf.square(grads[i]))\n grads_norm += tf.reduce_mean(tf.square(grads[i]))\n grads_norm /= float(len(grads))\n\n double_grads = td.gradient(grads_norm, disc.trainable_variables)\n\n grads_n_vars = [(grads[i] + doubleback_const * double_grads[i], disc.trainable_variables[i]) for i in range(len(grads))]\n disc.optimizer.apply_gradients(grads_n_vars)\n disc.trainable = False\n\n # Train generator\n #both_mod.train_on_batch([x, some_noise], np.ones(N))\n # Manually compute and apply gradient\n with tf.GradientTape() as td:\n with tf.GradientTape() as t:\n preds = both_mod([tf.cast(x, tf.float32), tf.cast(some_noise, tf.float32)])\n bl = tf.reduce_mean(keras.losses.binary_crossentropy(np.ones(N).reshape([N,1]), tf.cast(preds, tf.float64)))\n #bl = tf.losses.sigmoid_cross_entropy(preds, np.ones(N).reshape([N,1]))\n\n grads = t.gradient(bl, both_mod.trainable_variables)\n grads_norm = 0\n for i in range(len(grads)):\n #grads_norm += tf.reduce_sum(tf.square(grads[i]))\n grads_norm += tf.reduce_mean(tf.square(grads[i]))\n grads_norm /= float(len(grads))\n\n double_grads = td.gradient(grads_norm, both_mod.trainable_variables)\n\n grads_n_vars = [(grads[i] + doubleback_const*double_grads[i], both_mod.trainable_variables[i]) for i in range(len(grads))]\n both_mod.optimizer.apply_gradients(grads_n_vars)\n\n# Plot the results\nfig = plt.figure()\nplt.scatter(x, y)\nsome_noise = np.random.normal(size=[N,P])\npreds = gen.predict(np.hstack([x, some_noise]))\nplt.scatter(x, preds)\n#plt.savefig(\"images/motor_scatter.pdf\")\nplt.savefig(\"temp.pdf\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
prop = float(sys.argv[1])
def kind(n):
s = str(n)
l = len(s)
i = 0
j = i + 1
decr, bouncy, incr = False, False, False
while j < l:
a = int(s[i])
b = int(s[j])
if s[i] > s[j]:
decr = True
elif s[i] < s[j]:
incr = True
i += 1
j += 1
if decr and incr:
return True
return False
def calc(prop):
currentProp = 0
i = 100
countBouncy = 0
while currentProp < prop:
if kind(i):
countBouncy += 1
currentProp = (countBouncy * 100) / i
if currentProp >= prop:
return i
i += 1
return "Proportion was not reached."
calc(prop)
|
normal
|
{
"blob_id": "0de27101675eb8328d9a2831ed468a969b03e7d3",
"index": 5741,
"step-1": "<mask token>\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-3": "<mask token>\nprop = float(sys.argv[1])\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-4": "import sys\nprop = float(sys.argv[1])\n\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = countBouncy * 100 / i\n if currentProp >= prop:\n return i\n i += 1\n return 'Proportion was not reached.'\n\n\ncalc(prop)\n",
"step-5": "import sys\n\nprop = float(sys.argv[1])\n\ndef kind(n):\n s = str(n)\n l = len(s)\n i = 0\n j = i + 1\n decr, bouncy, incr = False, False, False\n while j < l:\n a = int(s[i])\n b = int(s[j])\n if s[i] > s[j]:\n decr = True\n elif s[i] < s[j]:\n incr = True\n i += 1\n j += 1\n if decr and incr:\n return True\n return False\n\ndef calc(prop):\n currentProp = 0\n i = 100\n countBouncy = 0\n while currentProp < prop:\n if kind(i):\n countBouncy += 1\n currentProp = (countBouncy * 100) / i\n if currentProp >= prop:\n return i\n i += 1\n return \"Proportion was not reached.\"\n\ncalc(prop)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class UserVetSchema(ma.Schema):
class Meta:
model = Uservet
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
ma = Marshmallow()
class UserVetSchema(ma.Schema):
class Meta:
model = Uservet
user_vet_1 = ['dni', 'email', 'nombre', 'apellidos', 'telefono', 'tipo_uservet'
]
<|reserved_special_token_1|>
from flask_marshmallow import Marshmallow
from models import Uservet
ma = Marshmallow()
class UserVetSchema(ma.Schema):
class Meta:
model = Uservet
user_vet_1 = ['dni', 'email', 'nombre', 'apellidos', 'telefono', 'tipo_uservet'
]
|
flexible
|
{
"blob_id": "677154aa99a5a4876532f3e1edfec45b1790384c",
"index": 9511,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass UserVetSchema(ma.Schema):\n\n\n class Meta:\n model = Uservet\n\n\n<mask token>\n",
"step-3": "<mask token>\nma = Marshmallow()\n\n\nclass UserVetSchema(ma.Schema):\n\n\n class Meta:\n model = Uservet\n\n\nuser_vet_1 = ['dni', 'email', 'nombre', 'apellidos', 'telefono', 'tipo_uservet'\n ]\n",
"step-4": "from flask_marshmallow import Marshmallow\nfrom models import Uservet\nma = Marshmallow()\n\n\nclass UserVetSchema(ma.Schema):\n\n\n class Meta:\n model = Uservet\n\n\nuser_vet_1 = ['dni', 'email', 'nombre', 'apellidos', 'telefono', 'tipo_uservet'\n ]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n - 1)
suma = lista[len(lista) - 1] + lista[len(lista) - 2]
lista.append(suma)
return lista
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n - 1)
suma = lista[len(lista) - 1] + lista[len(lista) - 2]
lista.append(suma)
return lista
def main():
resultado = fibonacci(6)
print(resultado)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n - 1)
suma = lista[len(lista) - 1] + lista[len(lista) - 2]
lista.append(suma)
return lista
def main():
resultado = fibonacci(6)
print(resultado)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
'''
fibonacci(6) => [1, 1, 2, 3, 5, 8]
fibonacci(7) => [1, 1, 2, 3, 5, 8, 13]
'''
def fibonacci(n):
if n == 0:
return []
elif n == 1:
return [1]
elif n == 2:
return [1, 1]
else:
lista = fibonacci(n-1)
suma = lista[len(lista)-1] + lista[len(lista)-2]
lista.append(suma)
return lista
def main():
resultado = fibonacci(6)
print(resultado)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "03062ea08bd6ad88376f7c2aa2c89d2194ed8b2e",
"index": 1074,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n - 1)\n suma = lista[len(lista) - 1] + lista[len(lista) - 2]\n lista.append(suma)\n return lista\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n - 1)\n suma = lista[len(lista) - 1] + lista[len(lista) - 2]\n lista.append(suma)\n return lista\n\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n - 1)\n suma = lista[len(lista) - 1] + lista[len(lista) - 2]\n lista.append(suma)\n return lista\n\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "'''\nfibonacci(6) => [1, 1, 2, 3, 5, 8]\nfibonacci(7) => [1, 1, 2, 3, 5, 8, 13]\n'''\n\ndef fibonacci(n):\n if n == 0:\n return []\n elif n == 1:\n return [1]\n elif n == 2:\n return [1, 1]\n else:\n lista = fibonacci(n-1)\n suma = lista[len(lista)-1] + lista[len(lista)-2]\n lista.append(suma)\n return lista\n\ndef main():\n resultado = fibonacci(6)\n print(resultado)\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
<|reserved_special_token_0|>
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
myencoding = 'utf-8'
chikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']
newframe = DataFrame()
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
totalfile = 'allstore.csv'
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
<|reserved_special_token_1|>
import pandas as pd
from pandas import DataFrame
myencoding = 'utf-8'
chikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']
newframe = DataFrame()
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
totalfile = 'allstore.csv'
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
<|reserved_special_token_1|>
import pandas as pd
from pandas import DataFrame
myencoding = 'utf-8'
chikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']
# chikenList = ['pelicana']
newframe = DataFrame()
for onestore in chikenList:
filename = onestore + '.csv'
myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)
# print(myframe.head())
# print('-'*30)
newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)
print(newframe.info())
totalfile = 'allstore.csv'
newframe.to_csv(totalfile, encoding=myencoding)
print(totalfile + '파일이 저장됨')
|
flexible
|
{
"blob_id": "11a31d3276201105ca7485fa4e4eb711012accd5",
"index": 2190,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\n<mask token>\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-3": "<mask token>\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\nnewframe = DataFrame()\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-4": "import pandas as pd\nfrom pandas import DataFrame\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\nnewframe = DataFrame()\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\nprint(newframe.info())\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')\n",
"step-5": "import pandas as pd\nfrom pandas import DataFrame\n\nmyencoding = 'utf-8'\nchikenList = ['pelicana', 'nene', 'cheogajip', 'goobne']\n# chikenList = ['pelicana']\n\nnewframe = DataFrame()\n\nfor onestore in chikenList:\n filename = onestore + '.csv'\n myframe = pd.read_csv(filename, index_col=0, encoding=myencoding)\n # print(myframe.head())\n # print('-'*30)\n newframe = pd.concat([newframe, myframe], axis=0, ignore_index=True)\n\nprint(newframe.info())\n\ntotalfile = 'allstore.csv'\nnewframe.to_csv(totalfile, encoding=myencoding)\nprint(totalfile + '파일이 저장됨')",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def main():
colors = vtk.vtkNamedColors()
Data = np.load('tessaltions_compressed.npz')
indices = meta['sorted_keys']
struct_D = {}
for i, s in enumerate(set([x[0] for x in indices])):
struct_D[s] = colors_list[i]
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for index in range(len(indices)):
x = Data['points_' + str(index)]
triangles = Data['triangles_' + str(index)]
print(index, x.shape, triangles.shape, '\r', end='')
points = vtk.vtkPoints()
for i in range(0, x.shape[0]):
points.InsertPoint(i, x[i, :])
ugrid = vtk.vtkUnstructuredGrid()
ugrid.Allocate(triangles.shape[0])
for i in range(triangles.shape[0]):
ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])
ugrid.SetPoints(points)
uGridNormals = vtk.vtkPolyDataNormals()
uGridNormals.SetInputData(ugrid)
uGridNormals.SetFeatureAngle(30.0)
uGridNormals.SplittingOn()
print(uGridNormals)
uGridNormals.Update()
normalsPolyData = vtk.vtkPolyData()
normalsPolyData.DeepCopy(uGridNormals.GetOutput())
ugridMapper = vtk.vtkPolyDataMapper()
ugridMapper.SetInputData(normalsPolyData)
ugridMapper.ScalarVisibilityOff()
ugridActor = vtk.vtkActor()
ugridActor.SetMapper(ugridMapper)
color = struct_D[indices[index][0]]
ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))
ugridActor.GetProperty().SetDiffuse(0.7)
ugridActor.GetProperty().SetSpecularPower(20)
ugridActor.GetProperty().SetSpecular(0.5)
ugridActor.GetProperty().EdgeVisibilityOff()
ugridActor.GetProperty().SetOpacity(0.5)
ugridActor.GetProperty().SetInterpolationToGouraud()
renderer.AddActor(ugridActor)
break
renderer.SetBackground(colors.GetColor3d('Beige'))
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(60.0)
renderer.GetActiveCamera().Azimuth(30.0)
renderer.GetActiveCamera().Dolly(1.2)
renWin.SetSize(640, 480)
renWin.Render()
iren.Start()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
colors = vtk.vtkNamedColors()
Data = np.load('tessaltions_compressed.npz')
indices = meta['sorted_keys']
struct_D = {}
for i, s in enumerate(set([x[0] for x in indices])):
struct_D[s] = colors_list[i]
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for index in range(len(indices)):
x = Data['points_' + str(index)]
triangles = Data['triangles_' + str(index)]
print(index, x.shape, triangles.shape, '\r', end='')
points = vtk.vtkPoints()
for i in range(0, x.shape[0]):
points.InsertPoint(i, x[i, :])
ugrid = vtk.vtkUnstructuredGrid()
ugrid.Allocate(triangles.shape[0])
for i in range(triangles.shape[0]):
ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])
ugrid.SetPoints(points)
uGridNormals = vtk.vtkPolyDataNormals()
uGridNormals.SetInputData(ugrid)
uGridNormals.SetFeatureAngle(30.0)
uGridNormals.SplittingOn()
print(uGridNormals)
uGridNormals.Update()
normalsPolyData = vtk.vtkPolyData()
normalsPolyData.DeepCopy(uGridNormals.GetOutput())
ugridMapper = vtk.vtkPolyDataMapper()
ugridMapper.SetInputData(normalsPolyData)
ugridMapper.ScalarVisibilityOff()
ugridActor = vtk.vtkActor()
ugridActor.SetMapper(ugridMapper)
color = struct_D[indices[index][0]]
ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))
ugridActor.GetProperty().SetDiffuse(0.7)
ugridActor.GetProperty().SetSpecularPower(20)
ugridActor.GetProperty().SetSpecular(0.5)
ugridActor.GetProperty().EdgeVisibilityOff()
ugridActor.GetProperty().SetOpacity(0.5)
ugridActor.GetProperty().SetInterpolationToGouraud()
renderer.AddActor(ugridActor)
break
renderer.SetBackground(colors.GetColor3d('Beige'))
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(60.0)
renderer.GetActiveCamera().Azimuth(30.0)
renderer.GetActiveCamera().Dolly(1.2)
renWin.SetSize(640, 480)
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
colors_list = pkl.load(open('permuted_colors.pkl', 'rb'))
meta = pkl.load(open('v_atlas/meta_information.pkl', 'rb'))
def main():
colors = vtk.vtkNamedColors()
Data = np.load('tessaltions_compressed.npz')
indices = meta['sorted_keys']
struct_D = {}
for i, s in enumerate(set([x[0] for x in indices])):
struct_D[s] = colors_list[i]
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for index in range(len(indices)):
x = Data['points_' + str(index)]
triangles = Data['triangles_' + str(index)]
print(index, x.shape, triangles.shape, '\r', end='')
points = vtk.vtkPoints()
for i in range(0, x.shape[0]):
points.InsertPoint(i, x[i, :])
ugrid = vtk.vtkUnstructuredGrid()
ugrid.Allocate(triangles.shape[0])
for i in range(triangles.shape[0]):
ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])
ugrid.SetPoints(points)
uGridNormals = vtk.vtkPolyDataNormals()
uGridNormals.SetInputData(ugrid)
uGridNormals.SetFeatureAngle(30.0)
uGridNormals.SplittingOn()
print(uGridNormals)
uGridNormals.Update()
normalsPolyData = vtk.vtkPolyData()
normalsPolyData.DeepCopy(uGridNormals.GetOutput())
ugridMapper = vtk.vtkPolyDataMapper()
ugridMapper.SetInputData(normalsPolyData)
ugridMapper.ScalarVisibilityOff()
ugridActor = vtk.vtkActor()
ugridActor.SetMapper(ugridMapper)
color = struct_D[indices[index][0]]
ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))
ugridActor.GetProperty().SetDiffuse(0.7)
ugridActor.GetProperty().SetSpecularPower(20)
ugridActor.GetProperty().SetSpecular(0.5)
ugridActor.GetProperty().EdgeVisibilityOff()
ugridActor.GetProperty().SetOpacity(0.5)
ugridActor.GetProperty().SetInterpolationToGouraud()
renderer.AddActor(ugridActor)
break
renderer.SetBackground(colors.GetColor3d('Beige'))
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(60.0)
renderer.GetActiveCamera().Azimuth(30.0)
renderer.GetActiveCamera().Dolly(1.2)
renWin.SetSize(640, 480)
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import vtk
import numpy as np
import pickle as pkl
colors_list = pkl.load(open('permuted_colors.pkl', 'rb'))
meta = pkl.load(open('v_atlas/meta_information.pkl', 'rb'))
def main():
colors = vtk.vtkNamedColors()
Data = np.load('tessaltions_compressed.npz')
indices = meta['sorted_keys']
struct_D = {}
for i, s in enumerate(set([x[0] for x in indices])):
struct_D[s] = colors_list[i]
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for index in range(len(indices)):
x = Data['points_' + str(index)]
triangles = Data['triangles_' + str(index)]
print(index, x.shape, triangles.shape, '\r', end='')
points = vtk.vtkPoints()
for i in range(0, x.shape[0]):
points.InsertPoint(i, x[i, :])
ugrid = vtk.vtkUnstructuredGrid()
ugrid.Allocate(triangles.shape[0])
for i in range(triangles.shape[0]):
ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])
ugrid.SetPoints(points)
uGridNormals = vtk.vtkPolyDataNormals()
uGridNormals.SetInputData(ugrid)
uGridNormals.SetFeatureAngle(30.0)
uGridNormals.SplittingOn()
print(uGridNormals)
uGridNormals.Update()
normalsPolyData = vtk.vtkPolyData()
normalsPolyData.DeepCopy(uGridNormals.GetOutput())
ugridMapper = vtk.vtkPolyDataMapper()
ugridMapper.SetInputData(normalsPolyData)
ugridMapper.ScalarVisibilityOff()
ugridActor = vtk.vtkActor()
ugridActor.SetMapper(ugridMapper)
color = struct_D[indices[index][0]]
ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))
ugridActor.GetProperty().SetDiffuse(0.7)
ugridActor.GetProperty().SetSpecularPower(20)
ugridActor.GetProperty().SetSpecular(0.5)
ugridActor.GetProperty().EdgeVisibilityOff()
ugridActor.GetProperty().SetOpacity(0.5)
ugridActor.GetProperty().SetInterpolationToGouraud()
renderer.AddActor(ugridActor)
break
renderer.SetBackground(colors.GetColor3d('Beige'))
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(60.0)
renderer.GetActiveCamera().Azimuth(30.0)
renderer.GetActiveCamera().Dolly(1.2)
renWin.SetSize(640, 480)
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python
"""
This example shows how to create an unstructured grid.
"""
import vtk
import numpy as np
import pickle as pkl
colors_list = pkl.load(open('permuted_colors.pkl','rb'))
meta = pkl.load(open('v_atlas/meta_information.pkl','rb'))
def main():
colors = vtk.vtkNamedColors()
Data=np.load('tessaltions_compressed.npz')
indices=meta['sorted_keys']
struct_D={} # a mapping of structure names to colors.
for i,s in enumerate(set([x[0] for x in indices])):
struct_D[s]=colors_list[i]
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
for index in range(len(indices)):
x=Data['points_'+str(index)]
triangles = Data['triangles_'+str(index)]
print(index,x.shape, triangles.shape,'\r',end='')
points = vtk.vtkPoints()
for i in range(0, x.shape[0]):
points.InsertPoint(i, x[i,:])
ugrid = vtk.vtkUnstructuredGrid()
ugrid.Allocate(triangles.shape[0])
for i in range(triangles.shape[0]):
ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i,:])
ugrid.SetPoints(points)
uGridNormals = vtk.vtkPolyDataNormals()
uGridNormals.SetInputData(ugrid)
uGridNormals.SetFeatureAngle(30.0)
#uGridNormals.ComputePointNormalsOn()
uGridNormals.SplittingOn()
print(uGridNormals)
uGridNormals.Update() # causes an error
normalsPolyData = vtk.vtkPolyData()
normalsPolyData.DeepCopy(uGridNormals.GetOutput())
ugridMapper = vtk.vtkPolyDataMapper()
ugridMapper.SetInputData(normalsPolyData)
ugridMapper.ScalarVisibilityOff()
# ugridMapper = vtk.vtkDataSetMapper()
# ugridMapper.SetInputData(ugrid)
ugridActor = vtk.vtkActor()
ugridActor.SetMapper(ugridMapper)
# print(index,indices[index],struct_D[indices[index][0]])
color = struct_D[indices[index][0]]
ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))
ugridActor.GetProperty().SetDiffuse(.7)
ugridActor.GetProperty().SetSpecularPower(20)
ugridActor.GetProperty().SetSpecular(.5)
ugridActor.GetProperty().EdgeVisibilityOff()
ugridActor.GetProperty().SetOpacity(0.5)
ugridActor.GetProperty().SetInterpolationToGouraud()
renderer.AddActor(ugridActor)
break
renderer.SetBackground(colors.GetColor3d('Beige'))
renderer.ResetCamera()
renderer.GetActiveCamera().Elevation(60.0)
renderer.GetActiveCamera().Azimuth(30.0)
renderer.GetActiveCamera().Dolly(1.2)
renWin.SetSize(640, 480)
# Interact with the data.
renWin.Render()
iren.Start()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "7261c5f9ac87c8337383daec312372b345ab7652",
"index": 4109,
"step-1": "<mask token>\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n Data = np.load('tessaltions_compressed.npz')\n indices = meta['sorted_keys']\n struct_D = {}\n for i, s in enumerate(set([x[0] for x in indices])):\n struct_D[s] = colors_list[i]\n renderer = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n for index in range(len(indices)):\n x = Data['points_' + str(index)]\n triangles = Data['triangles_' + str(index)]\n print(index, x.shape, triangles.shape, '\\r', end='')\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i, :])\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])\n ugrid.SetPoints(points)\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n uGridNormals.SplittingOn()\n print(uGridNormals)\n uGridNormals.Update()\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(0.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(0.5)\n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n renderer.AddActor(ugridActor)\n break\n renderer.SetBackground(colors.GetColor3d('Beige'))\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n renWin.SetSize(640, 480)\n renWin.Render()\n iren.Start()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n Data = np.load('tessaltions_compressed.npz')\n indices = meta['sorted_keys']\n struct_D = {}\n for i, s in enumerate(set([x[0] for x in indices])):\n struct_D[s] = colors_list[i]\n renderer = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n for index in range(len(indices)):\n x = Data['points_' + str(index)]\n triangles = Data['triangles_' + str(index)]\n print(index, x.shape, triangles.shape, '\\r', end='')\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i, :])\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])\n ugrid.SetPoints(points)\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n uGridNormals.SplittingOn()\n print(uGridNormals)\n uGridNormals.Update()\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(0.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(0.5)\n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n renderer.AddActor(ugridActor)\n break\n renderer.SetBackground(colors.GetColor3d('Beige'))\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n renWin.SetSize(640, 480)\n renWin.Render()\n iren.Start()\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\ncolors_list = pkl.load(open('permuted_colors.pkl', 'rb'))\nmeta = pkl.load(open('v_atlas/meta_information.pkl', 'rb'))\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n Data = np.load('tessaltions_compressed.npz')\n indices = meta['sorted_keys']\n struct_D = {}\n for i, s in enumerate(set([x[0] for x in indices])):\n struct_D[s] = colors_list[i]\n renderer = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n for index in range(len(indices)):\n x = Data['points_' + str(index)]\n triangles = Data['triangles_' + str(index)]\n print(index, x.shape, triangles.shape, '\\r', end='')\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i, :])\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])\n ugrid.SetPoints(points)\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n uGridNormals.SplittingOn()\n print(uGridNormals)\n uGridNormals.Update()\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(0.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(0.5)\n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n renderer.AddActor(ugridActor)\n break\n renderer.SetBackground(colors.GetColor3d('Beige'))\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n renWin.SetSize(640, 480)\n renWin.Render()\n iren.Start()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport vtk\nimport numpy as np\nimport pickle as pkl\ncolors_list = pkl.load(open('permuted_colors.pkl', 'rb'))\nmeta = pkl.load(open('v_atlas/meta_information.pkl', 'rb'))\n\n\ndef main():\n colors = vtk.vtkNamedColors()\n Data = np.load('tessaltions_compressed.npz')\n indices = meta['sorted_keys']\n struct_D = {}\n for i, s in enumerate(set([x[0] for x in indices])):\n struct_D[s] = colors_list[i]\n renderer = vtk.vtkRenderer()\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n for index in range(len(indices)):\n x = Data['points_' + str(index)]\n triangles = Data['triangles_' + str(index)]\n print(index, x.shape, triangles.shape, '\\r', end='')\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i, :])\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i, :])\n ugrid.SetPoints(points)\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n uGridNormals.SplittingOn()\n print(uGridNormals)\n uGridNormals.Update()\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(0.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(0.5)\n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n renderer.AddActor(ugridActor)\n break\n renderer.SetBackground(colors.GetColor3d('Beige'))\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n renWin.SetSize(640, 480)\n renWin.Render()\n iren.Start()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\n\"\"\"\nThis example shows how to create an unstructured grid.\n\"\"\"\n\nimport vtk\nimport numpy as np\nimport pickle as pkl\n\ncolors_list = pkl.load(open('permuted_colors.pkl','rb'))\nmeta = pkl.load(open('v_atlas/meta_information.pkl','rb'))\n\ndef main():\n colors = vtk.vtkNamedColors()\n\n Data=np.load('tessaltions_compressed.npz')\n\n indices=meta['sorted_keys']\n struct_D={} # a mapping of structure names to colors.\n for i,s in enumerate(set([x[0] for x in indices])):\n struct_D[s]=colors_list[i]\n \n renderer = vtk.vtkRenderer()\n\n renWin = vtk.vtkRenderWindow()\n renWin.AddRenderer(renderer)\n iren = vtk.vtkRenderWindowInteractor()\n iren.SetRenderWindow(renWin)\n\n for index in range(len(indices)):\n x=Data['points_'+str(index)]\n triangles = Data['triangles_'+str(index)]\n print(index,x.shape, triangles.shape,'\\r',end='')\n\n points = vtk.vtkPoints()\n for i in range(0, x.shape[0]):\n points.InsertPoint(i, x[i,:])\n\n ugrid = vtk.vtkUnstructuredGrid()\n ugrid.Allocate(triangles.shape[0])\n for i in range(triangles.shape[0]):\n ugrid.InsertNextCell(vtk.VTK_TRIANGLE, 3, triangles[i,:])\n\n ugrid.SetPoints(points)\n\n\n uGridNormals = vtk.vtkPolyDataNormals()\n uGridNormals.SetInputData(ugrid)\n uGridNormals.SetFeatureAngle(30.0)\n\n #uGridNormals.ComputePointNormalsOn()\n uGridNormals.SplittingOn()\n\n print(uGridNormals)\n uGridNormals.Update() # causes an error\n\n normalsPolyData = vtk.vtkPolyData()\n normalsPolyData.DeepCopy(uGridNormals.GetOutput())\n \n ugridMapper = vtk.vtkPolyDataMapper()\n ugridMapper.SetInputData(normalsPolyData)\n ugridMapper.ScalarVisibilityOff()\n \n # ugridMapper = vtk.vtkDataSetMapper()\n # ugridMapper.SetInputData(ugrid)\n\n ugridActor = vtk.vtkActor()\n ugridActor.SetMapper(ugridMapper)\n # print(index,indices[index],struct_D[indices[index][0]])\n color = struct_D[indices[index][0]]\n ugridActor.GetProperty().SetDiffuseColor(colors.GetColor3d(color))\n ugridActor.GetProperty().SetDiffuse(.7)\n ugridActor.GetProperty().SetSpecularPower(20)\n ugridActor.GetProperty().SetSpecular(.5)\n \n ugridActor.GetProperty().EdgeVisibilityOff()\n ugridActor.GetProperty().SetOpacity(0.5)\n ugridActor.GetProperty().SetInterpolationToGouraud()\n\n renderer.AddActor(ugridActor)\n break\n\n renderer.SetBackground(colors.GetColor3d('Beige'))\n\n renderer.ResetCamera()\n renderer.GetActiveCamera().Elevation(60.0)\n renderer.GetActiveCamera().Azimuth(30.0)\n renderer.GetActiveCamera().Dolly(1.2)\n\n renWin.SetSize(640, 480)\n\n # Interact with the data.\n renWin.Render()\n\n iren.Start()\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def bin_spatial(img, color_space='RGB', size=(32, 32)):
colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.
COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,
'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}
if color_space.upper() != 'RGB':
method = colour_dict.get(color_space, 'RGB')
img = cv2.cvtColor(img, method)
else:
img = np.copy(img)
small_img = cv2.resize(img, size)
feature_vec = small_img.ravel()
return feature_vec
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def bin_spatial(img, color_space='RGB', size=(32, 32)):
colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.
COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,
'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}
if color_space.upper() != 'RGB':
method = colour_dict.get(color_space, 'RGB')
img = cv2.cvtColor(img, method)
else:
img = np.copy(img)
small_img = cv2.resize(img, size)
feature_vec = small_img.ravel()
return feature_vec
if __name__ == '__main__':
image = mpimg.imread('cutout1.jpg')
feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
<|reserved_special_token_1|>
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
def bin_spatial(img, color_space='RGB', size=(32, 32)):
colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.
COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,
'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}
if color_space.upper() != 'RGB':
method = colour_dict.get(color_space, 'RGB')
img = cv2.cvtColor(img, method)
else:
img = np.copy(img)
small_img = cv2.resize(img, size)
feature_vec = small_img.ravel()
return feature_vec
if __name__ == '__main__':
image = mpimg.imread('cutout1.jpg')
feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
<|reserved_special_token_1|>
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
# KEEP IN MIND IF YOU DECIDE TO USE THIS FUNCTION LATER
# IN YOUR PROJECT THAT IF YOU READ THE IMAGE WITH
# cv2.imread() INSTEAD YOU START WITH BGR COLOR!
def bin_spatial(img, color_space='RGB', size=(32, 32)):
colour_dict = { 'RGB':'RGB',
'BGR':cv2.COLOR_BGR2RGB,
'HLS':cv2.COLOR_BGR2HLS,
'HSV':cv2.COLOR_BGR2HSV,
'LUV':cv2.COLOR_BGR2LUV,
'YUV': cv2.COLOR_RGB2YUV,
'YCrCb': cv2.COLOR_RGB2YCrCb
}
# If someother Colour Space
if color_space.upper() != 'RGB':
method = colour_dict.get(color_space, 'RGB')
img = cv2.cvtColor(img, method)
else:
img = np.copy(img)
small_img = cv2.resize(img, size)
feature_vec = small_img.ravel()
# Return the feature vector
return feature_vec
if __name__ == "__main__":
# You can also read cutout2, 3, 4 etc. to see other examples
image = mpimg.imread('cutout1.jpg')
feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))
# Plot features
plt.plot(feature_vec)
plt.title('Spatially Binned Features')
##
## Solution
##
# Define a function to compute color histogram features
# Pass the color_space flag as 3-letter all caps string
# like 'HSV' or 'LUV' etc.
# def bin_spatial(img, color_space='RGB', size=(32, 32)):
# # Convert image to new color space (if specified)
# if color_space != 'RGB':
# if color_space == 'HSV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
# elif color_space == 'LUV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)
# elif color_space == 'HLS':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
# elif color_space == 'YUV':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)
# elif color_space == 'YCrCb':
# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)
# else: feature_image = np.copy(img)
# # Use cv2.resize().ravel() to create the feature vector
# features = cv2.resize(feature_image, size).ravel()
# # Return the feature vector
# return features
|
flexible
|
{
"blob_id": "f178ae70ce54244624c2254d0d6256b83144db33",
"index": 5085,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.\n COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,\n 'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}\n if color_space.upper() != 'RGB':\n method = colour_dict.get(color_space, 'RGB')\n img = cv2.cvtColor(img, method)\n else:\n img = np.copy(img)\n small_img = cv2.resize(img, size)\n feature_vec = small_img.ravel()\n return feature_vec\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.\n COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,\n 'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}\n if color_space.upper() != 'RGB':\n method = colour_dict.get(color_space, 'RGB')\n img = cv2.cvtColor(img, method)\n else:\n img = np.copy(img)\n small_img = cv2.resize(img, size)\n feature_vec = small_img.ravel()\n return feature_vec\n\n\nif __name__ == '__main__':\n image = mpimg.imread('cutout1.jpg')\n feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))\n plt.plot(feature_vec)\n plt.title('Spatially Binned Features')\n",
"step-4": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n colour_dict = {'RGB': 'RGB', 'BGR': cv2.COLOR_BGR2RGB, 'HLS': cv2.\n COLOR_BGR2HLS, 'HSV': cv2.COLOR_BGR2HSV, 'LUV': cv2.COLOR_BGR2LUV,\n 'YUV': cv2.COLOR_RGB2YUV, 'YCrCb': cv2.COLOR_RGB2YCrCb}\n if color_space.upper() != 'RGB':\n method = colour_dict.get(color_space, 'RGB')\n img = cv2.cvtColor(img, method)\n else:\n img = np.copy(img)\n small_img = cv2.resize(img, size)\n feature_vec = small_img.ravel()\n return feature_vec\n\n\nif __name__ == '__main__':\n image = mpimg.imread('cutout1.jpg')\n feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))\n plt.plot(feature_vec)\n plt.title('Spatially Binned Features')\n",
"step-5": "import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\n\n\n# Define a function to compute color histogram features \n# Pass the color_space flag as 3-letter all caps string\n# like 'HSV' or 'LUV' etc.\n# KEEP IN MIND IF YOU DECIDE TO USE THIS FUNCTION LATER\n# IN YOUR PROJECT THAT IF YOU READ THE IMAGE WITH \n# cv2.imread() INSTEAD YOU START WITH BGR COLOR!\ndef bin_spatial(img, color_space='RGB', size=(32, 32)):\n colour_dict = { 'RGB':'RGB',\n 'BGR':cv2.COLOR_BGR2RGB,\n 'HLS':cv2.COLOR_BGR2HLS,\n 'HSV':cv2.COLOR_BGR2HSV,\n 'LUV':cv2.COLOR_BGR2LUV,\n 'YUV': cv2.COLOR_RGB2YUV,\n 'YCrCb': cv2.COLOR_RGB2YCrCb\n }\n \n # If someother Colour Space\n if color_space.upper() != 'RGB':\n method = colour_dict.get(color_space, 'RGB')\n img = cv2.cvtColor(img, method)\n else:\n img = np.copy(img)\n\n small_img = cv2.resize(img, size)\n feature_vec = small_img.ravel()\n # Return the feature vector\n return feature_vec\n\nif __name__ == \"__main__\": \n # You can also read cutout2, 3, 4 etc. to see other examples\n image = mpimg.imread('cutout1.jpg')\n feature_vec = bin_spatial(image, color_space='HSV', size=(32, 32))\n\n # Plot features\n plt.plot(feature_vec)\n plt.title('Spatially Binned Features')\n\n\n##\n## Solution\n##\n# Define a function to compute color histogram features \n# Pass the color_space flag as 3-letter all caps string\n# like 'HSV' or 'LUV' etc.\n# def bin_spatial(img, color_space='RGB', size=(32, 32)):\n# # Convert image to new color space (if specified)\n# if color_space != 'RGB':\n# if color_space == 'HSV':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n# elif color_space == 'LUV':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n# elif color_space == 'HLS':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n# elif color_space == 'YUV':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n# elif color_space == 'YCrCb':\n# feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n# else: feature_image = np.copy(img) \n# # Use cv2.resize().ravel() to create the feature vector\n# features = cv2.resize(feature_image, size).ravel() \n# # Return the feature vector\n# return features",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if len(obj_list):
for obj in obj_list:
obj.print_object()
print()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
obj_list = request_client.get_cross_margin_loan_orders()
if len(obj_list):
for obj in obj_list:
obj.print_object()
print()
<|reserved_special_token_1|>
from huobi import RequestClient
from huobi.constant.test import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
obj_list = request_client.get_cross_margin_loan_orders()
if len(obj_list):
for obj in obj_list:
obj.print_object()
print()
|
flexible
|
{
"blob_id": "c65969bba72142f4a328f978d78e0235cd56e393",
"index": 8618,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif len(obj_list):\n for obj in obj_list:\n obj.print_object()\n print()\n",
"step-3": "<mask token>\nrequest_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)\nobj_list = request_client.get_cross_margin_loan_orders()\nif len(obj_list):\n for obj in obj_list:\n obj.print_object()\n print()\n",
"step-4": "from huobi import RequestClient\nfrom huobi.constant.test import *\nrequest_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)\nobj_list = request_client.get_cross_margin_loan_orders()\nif len(obj_list):\n for obj in obj_list:\n obj.print_object()\n print()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for d in doc:
word = d.text
pos = d.pos_
dep = d.dep_
if re.search('subj', dep):
word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +
word + ahref2 + word + '</a>')
subj_array.append(word)
print(word2)
print(pos)
print(dep)
if re.search('obj', dep):
word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +
word + ahref2 + word + '</a>')
obj_array.append(word)
print(word2)
print(pos)
print(dep)
subj_array.sort()
obj_array.sort()
for subj in subj_array:
print(subj)
for obj in obj_array:
print(obj)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
text = sys.argv[1]
nlp = spacy.load('en_core_web_sm')
doc = nlp(text)
ahref = '<a href="'
ahref2 = '"\\>'
subj_array = []
obj_array = []
for d in doc:
word = d.text
pos = d.pos_
dep = d.dep_
if re.search('subj', dep):
word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +
word + ahref2 + word + '</a>')
subj_array.append(word)
print(word2)
print(pos)
print(dep)
if re.search('obj', dep):
word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +
word + ahref2 + word + '</a>')
obj_array.append(word)
print(word2)
print(pos)
print(dep)
subj_array.sort()
obj_array.sort()
for subj in subj_array:
print(subj)
for obj in obj_array:
print(obj)
<|reserved_special_token_1|>
import sys
import spacy
import re
text = sys.argv[1]
nlp = spacy.load('en_core_web_sm')
doc = nlp(text)
ahref = '<a href="'
ahref2 = '"\\>'
subj_array = []
obj_array = []
for d in doc:
word = d.text
pos = d.pos_
dep = d.dep_
if re.search('subj', dep):
word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +
word + ahref2 + word + '</a>')
subj_array.append(word)
print(word2)
print(pos)
print(dep)
if re.search('obj', dep):
word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +
word + ahref2 + word + '</a>')
obj_array.append(word)
print(word2)
print(pos)
print(dep)
subj_array.sort()
obj_array.sort()
for subj in subj_array:
print(subj)
for obj in obj_array:
print(obj)
<|reserved_special_token_1|>
#(C)Inspire Search 2020/5/31 Coded by Tsubasa Kato (@_stingraze)
#Last edited on 2020/6/1 11:36AM JST
import sys
import spacy
import re
#gets query from argv[1]
text = sys.argv[1]
nlp = spacy.load('en_core_web_sm')
doc = nlp(text)
ahref = "<a href=\""
ahref2 = "\"\>"
#arrays for storing subject and object types
subj_array = []
obj_array = []
for d in doc:
#print((d.text, d.pos_, d.dep_))
word = d.text
pos = d.pos_
dep = d.dep_
#If it matches subject, do this
if re.search(r'subj', dep):
#URL to SuperAI Search
word2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'
subj_array.append(word)
print (word2)
print (pos)
print (dep)
#If it matches object, do this
if re.search(r'obj', dep):
#URL to SuperAI Search
word2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'
obj_array.append(word)
print (word2)
print (pos)
print (dep)
#Sorts both arrays
#ToDo & Note to self:
#Study more of sorting so I can visualize this as table etc.
subj_array.sort()
obj_array.sort()
for subj in subj_array:
print (subj)
for obj in obj_array:
print (obj)
|
flexible
|
{
"blob_id": "ecc001394c1f3bba78559cba7eeb216dd3a942d8",
"index": 4711,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor d in doc:\n word = d.text\n pos = d.pos_\n dep = d.dep_\n if re.search('subj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n subj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\n if re.search('obj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n obj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\nsubj_array.sort()\nobj_array.sort()\nfor subj in subj_array:\n print(subj)\nfor obj in obj_array:\n print(obj)\n",
"step-3": "<mask token>\ntext = sys.argv[1]\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(text)\nahref = '<a href=\"'\nahref2 = '\"\\\\>'\nsubj_array = []\nobj_array = []\nfor d in doc:\n word = d.text\n pos = d.pos_\n dep = d.dep_\n if re.search('subj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n subj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\n if re.search('obj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n obj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\nsubj_array.sort()\nobj_array.sort()\nfor subj in subj_array:\n print(subj)\nfor obj in obj_array:\n print(obj)\n",
"step-4": "import sys\nimport spacy\nimport re\ntext = sys.argv[1]\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(text)\nahref = '<a href=\"'\nahref2 = '\"\\\\>'\nsubj_array = []\nobj_array = []\nfor d in doc:\n word = d.text\n pos = d.pos_\n dep = d.dep_\n if re.search('subj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n subj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\n if re.search('obj', dep):\n word2 = (ahref + 'http://www.superai.online/solr/search.php?query=' +\n word + ahref2 + word + '</a>')\n obj_array.append(word)\n print(word2)\n print(pos)\n print(dep)\nsubj_array.sort()\nobj_array.sort()\nfor subj in subj_array:\n print(subj)\nfor obj in obj_array:\n print(obj)\n",
"step-5": "#(C)Inspire Search 2020/5/31 Coded by Tsubasa Kato (@_stingraze)\n#Last edited on 2020/6/1 11:36AM JST\nimport sys\nimport spacy\nimport re\n#gets query from argv[1]\ntext = sys.argv[1]\n\nnlp = spacy.load('en_core_web_sm')\ndoc = nlp(text)\n\nahref = \"<a href=\\\"\"\nahref2 = \"\\\"\\>\"\n\n#arrays for storing subject and object types \nsubj_array = []\nobj_array = []\n\nfor d in doc:\n\t#print((d.text, d.pos_, d.dep_))\n\tword = d.text\n\tpos = d.pos_\n\tdep = d.dep_\n#If it matches subject, do this\n\tif re.search(r'subj', dep):\n\t\t#URL to SuperAI Search\n\t\tword2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'\n\t\tsubj_array.append(word)\n\t\tprint (word2)\n\t\tprint (pos)\n\t\tprint (dep)\n\n#If it matches object, do this\n\tif re.search(r'obj', dep):\n\t\t#URL to SuperAI Search\n\t\tword2 = ahref + 'http://www.superai.online/solr/search.php?query='+ word + ahref2 + word + '</a>'\n\t\tobj_array.append(word)\n\t\tprint (word2)\n\t\tprint (pos)\n\t\tprint (dep)\n\n\n#Sorts both arrays\n#ToDo & Note to self: \n#Study more of sorting so I can visualize this as table etc.\nsubj_array.sort()\nobj_array.sort()\nfor subj in subj_array:\n\tprint (subj)\n\nfor obj in obj_array:\n\tprint (obj)\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
import unittest
import time
from common import HTMLTestReport
class Get(unittest.TestCase):
TMPTOKEN = ''
TOKEN = ''
def setUp(self):
pass
# 获取临时token,opterTmpToken
def test_gettmptoken(self):
url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'
params = {'sysID': '5'}
r = requests.get(url=url, params=params)
print(r.text)
opterTmpToken = r.json().get('data')['opterTmpToken']
Get.TMPTOKEN = opterTmpToken
print(opterTmpToken)
# 获取正式token,opterToken
def test_gettoken(self):
url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'
params = {'opterTmpToken': Get.TMPTOKEN}
r = requests.get(url=url, params=params)
opterToken = r.json().get('data')['opterToken']
Get.TOKEN = opterToken
print(opterToken)
#获取教师资质信息,校验结果是否返回success
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken':Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['message'] == 'Success'
print(r.json())
# 获取教师资质信息,校验接口返回的老师资质相关信息是否正确
def test_getQualificationInfo(self):
url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'
para = {'opterToken': Get.TOKEN}
r = requests.get(url=url, params=para)
assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'
assert r.json()['data'][2]['certificate_url'] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'
assert r.json()['data'][2]['teacher_url'] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'
assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'
def tearDown(self):
pass
def Run():
suite = unittest.TestSuite()
# 执行顺序是安装加载顺序:先执行test_case2,再执行test_case1
suite.addTest(Get('test_gettmptoken'))
suite.addTest(Get('test_gettoken'))
suite.addTest(Get('test_getQualificationInfo'))
now = time.strftime("%Y-%m-%d_%H%M", time.localtime())
filepath = './report/' + now + '.html' # 测试报告存放的位置
fp = open(filepath, 'wb')
runner = HTMLTestReport.HTMLTestRunner(
stream=fp,
title='接口自动化测试报告',
tester='白雪'
)
runner.run(suite)
fp.close()
Run()
|
normal
|
{
"blob_id": "773c217f7f76bd82ed3dabf7ae1aba1871f0932f",
"index": 8539,
"step-1": "<mask token>\n\n\nclass Get(unittest.TestCase):\n <mask token>\n <mask token>\n\n def setUp(self):\n pass\n <mask token>\n\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'\n ] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'\n ] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Get(unittest.TestCase):\n <mask token>\n <mask token>\n\n def setUp(self):\n pass\n\n def test_gettmptoken(self):\n url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'\n params = {'sysID': '5'}\n r = requests.get(url=url, params=params)\n print(r.text)\n opterTmpToken = r.json().get('data')['opterTmpToken']\n Get.TMPTOKEN = opterTmpToken\n print(opterTmpToken)\n\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'\n ] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'\n ] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Get(unittest.TestCase):\n TMPTOKEN = ''\n TOKEN = ''\n\n def setUp(self):\n pass\n\n def test_gettmptoken(self):\n url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'\n params = {'sysID': '5'}\n r = requests.get(url=url, params=params)\n print(r.text)\n opterTmpToken = r.json().get('data')['opterTmpToken']\n Get.TMPTOKEN = opterTmpToken\n print(opterTmpToken)\n\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'\n ] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'\n ] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\n\n<mask token>\n",
"step-4": "import requests\nimport unittest\nimport time\nfrom common import HTMLTestReport\n\n\nclass Get(unittest.TestCase):\n TMPTOKEN = ''\n TOKEN = ''\n\n def setUp(self):\n pass\n\n def test_gettmptoken(self):\n url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'\n params = {'sysID': '5'}\n r = requests.get(url=url, params=params)\n print(r.text)\n opterTmpToken = r.json().get('data')['opterTmpToken']\n Get.TMPTOKEN = opterTmpToken\n print(opterTmpToken)\n\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'\n ] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'\n ] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\n\ndef Run():\n suite = unittest.TestSuite()\n suite.addTest(Get('test_gettmptoken'))\n suite.addTest(Get('test_gettoken'))\n suite.addTest(Get('test_getQualificationInfo'))\n now = time.strftime('%Y-%m-%d_%H%M', time.localtime())\n filepath = './report/' + now + '.html'\n fp = open(filepath, 'wb')\n runner = HTMLTestReport.HTMLTestRunner(stream=fp, title='接口自动化测试报告',\n tester='白雪')\n runner.run(suite)\n fp.close()\n\n\nRun()\n",
"step-5": "import requests\nimport unittest\nimport time\nfrom common import HTMLTestReport\n\n\nclass Get(unittest.TestCase):\n TMPTOKEN = ''\n TOKEN = ''\n def setUp(self):\n pass\n\n # 获取临时token,opterTmpToken\n def test_gettmptoken(self):\n url = 'https://jdapi.jd100.com/uc/core/v1/sys/opterTmpToken'\n params = {'sysID': '5'}\n r = requests.get(url=url, params=params)\n print(r.text)\n opterTmpToken = r.json().get('data')['opterTmpToken']\n Get.TMPTOKEN = opterTmpToken\n print(opterTmpToken)\n\n # 获取正式token,opterToken\n def test_gettoken(self):\n url = 'https://jdapi.jd100.com/uc/v1/sys/opterToken'\n params = {'opterTmpToken': Get.TMPTOKEN}\n r = requests.get(url=url, params=params)\n opterToken = r.json().get('data')['opterToken']\n Get.TOKEN = opterToken\n print(opterToken)\n\n #获取教师资质信息,校验结果是否返回success\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken':Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['message'] == 'Success'\n print(r.json())\n\n # 获取教师资质信息,校验接口返回的老师资质相关信息是否正确\n def test_getQualificationInfo(self):\n url = 'https://jdapi.jd100.com/coursemgr/v1/getQualificationInfo'\n para = {'opterToken': Get.TOKEN}\n r = requests.get(url=url, params=para)\n assert r.json()['data'][2]['teacher_name'] == '测试勿扰老师'\n assert r.json()['data'][2]['certificate_url'] == 'https://jdspace.jd100.com/teachers/5c5f5d11-13f2-4ce0-8959-5e2ab23f22be.jpg'\n assert r.json()['data'][2]['teacher_url'] == 'https://jdspace.jd100.com/teachers/be6195dc-5f78-4661-b4dd-6ac709994498.jpg'\n assert r.json()['data'][2]['teacher_certificate'] == '111111111111111'\n\n def tearDown(self):\n pass\n\ndef Run():\n suite = unittest.TestSuite()\n # 执行顺序是安装加载顺序:先执行test_case2,再执行test_case1\n suite.addTest(Get('test_gettmptoken'))\n suite.addTest(Get('test_gettoken'))\n suite.addTest(Get('test_getQualificationInfo'))\n now = time.strftime(\"%Y-%m-%d_%H%M\", time.localtime())\n filepath = './report/' + now + '.html' # 测试报告存放的位置\n fp = open(filepath, 'wb')\n runner = HTMLTestReport.HTMLTestRunner(\n stream=fp,\n title='接口自动化测试报告',\n tester='白雪'\n )\n runner.run(suite)\n fp.close()\n\nRun()",
"step-ids": [
6,
7,
8,
11,
12
]
}
|
[
6,
7,
8,
11,
12
] |
from django.contrib import admin
from students.models import Child_detail
class ChildAdmin(admin.ModelAdmin):
def queryset(self, request):
"""
Filter the Child objects to only
display those for the currently signed in user.
"""
qs = super(ChildAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
if request.user.user_category == 'block':
return qs.filter(block=request.user.account.associated_with)
if request.user.user_category == 'school':
return qs.filter(school=request.user.account.associated_with)
if request.user.user_category == 'district':
return qs.filter(district=request.user.account.associated_with)
# Register your models here.
admin.site.register(Child_detail,ChildAdmin)
|
normal
|
{
"blob_id": "582f2e6972bad85c2aaedd248f050f708c61973b",
"index": 2332,
"step-1": "<mask token>\n\n\nclass ChildAdmin(admin.ModelAdmin):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ChildAdmin(admin.ModelAdmin):\n\n def queryset(self, request):\n \"\"\"\n Filter the Child objects to only\n display those for the currently signed in user.\n \"\"\"\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n return qs\n if request.user.user_category == 'block':\n return qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n return qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n return qs.filter(district=request.user.account.associated_with)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ChildAdmin(admin.ModelAdmin):\n\n def queryset(self, request):\n \"\"\"\n Filter the Child objects to only\n display those for the currently signed in user.\n \"\"\"\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n return qs\n if request.user.user_category == 'block':\n return qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n return qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n return qs.filter(district=request.user.account.associated_with)\n\n\nadmin.site.register(Child_detail, ChildAdmin)\n",
"step-4": "from django.contrib import admin\nfrom students.models import Child_detail\n\n\nclass ChildAdmin(admin.ModelAdmin):\n\n def queryset(self, request):\n \"\"\"\n Filter the Child objects to only\n display those for the currently signed in user.\n \"\"\"\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n return qs\n if request.user.user_category == 'block':\n return qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n return qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n return qs.filter(district=request.user.account.associated_with)\n\n\nadmin.site.register(Child_detail, ChildAdmin)\n",
"step-5": "from django.contrib import admin\nfrom students.models import Child_detail\nclass ChildAdmin(admin.ModelAdmin):\n\t\n\n\n def queryset(self, request):\n \"\"\"\n Filter the Child objects to only\n display those for the currently signed in user.\n \"\"\"\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n \treturn qs\n if request.user.user_category == 'block':\n \treturn qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n \treturn qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n \treturn qs.filter(district=request.user.account.associated_with)\n # Register your models here.\n\nadmin.site.register(Child_detail,ChildAdmin)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python
from xrouter import api
api.main()
|
normal
|
{
"blob_id": "64368679aa2e387e25a36b2f3d0312a99b819e95",
"index": 2147,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napi.main()\n",
"step-3": "from xrouter import api\napi.main()\n",
"step-4": "#!/usr/bin/env python\nfrom xrouter import api\napi.main()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {'conviction_date': datetime.strptime(re.match(
'(\\d+/\\d+/\\d+)', tr[0].text_content().strip()).group(1),
'%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(), 'convicted_name':
tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(
), 'pdf': tr[5].xpath('.//a')[0].get('href')}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
<|reserved_special_token_0|>
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {'conviction_date': datetime.strptime(re.match(
'(\\d+/\\d+/\\d+)', tr[0].text_content().strip()).group(1),
'%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(), 'convicted_name':
tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(
), 'pdf': tr[5].xpath('.//a')[0].get('href')}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
doc = lxml.html.parse(urllib2.urlopen(
'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'
))
root = doc.getroot()
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {'conviction_date': datetime.strptime(re.match(
'(\\d+/\\d+/\\d+)', tr[0].text_content().strip()).group(1),
'%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(), 'convicted_name':
tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(
), 'pdf': tr[5].xpath('.//a')[0].get('href')}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
<|reserved_special_token_0|>
doc = lxml.html.parse(urllib2.urlopen(
'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'
))
root = doc.getroot()
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {'conviction_date': datetime.strptime(re.match(
'(\\d+/\\d+/\\d+)', tr[0].text_content().strip()).group(1),
'%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(), 'convicted_name':
tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(
), 'pdf': tr[5].xpath('.//a')[0].get('href')}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
<|reserved_special_token_1|>
import scraperwiki, lxml.html, urllib2, re
from datetime import datetime
doc = lxml.html.parse(urllib2.urlopen(
'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'
))
root = doc.getroot()
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {'conviction_date': datetime.strptime(re.match(
'(\\d+/\\d+/\\d+)', tr[0].text_content().strip()).group(1),
'%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(), 'convicted_name':
tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(
), 'pdf': tr[5].xpath('.//a')[0].get('href')}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
import scraperwiki, lxml.html, urllib2, re
from datetime import datetime
doc = lxml.html.parse(urllib2.urlopen(
'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'
))
root = doc.getroot()
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {'conviction_date': datetime.strptime(re.match(
'(\\d+/\\d+/\\d+)', tr[0].text_content().strip()).group(1),
'%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(), 'convicted_name':
tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(
), 'pdf': tr[5].xpath('.//a')[0].get('href')}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
<|reserved_special_token_1|>
import scraperwiki, lxml.html, urllib2, re
from datetime import datetime
#html = scraperwiki.scrape("http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm")
doc = lxml.html.parse(urllib2.urlopen("http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm"))
root = doc.getroot()
#select the table that contains the offenders, ignoring the first one that contains the header row
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {
'conviction_date': datetime.strptime(
re.match("(\d+/\d+/\d+)", tr[0].text_content().strip()).group(1),
"%d/%m/%Y"), #sometimes they include two dates in the entry, so we'll have to grab the first (damnit)
'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(),
'convicted_name': tr[3].text_content().strip(),
'agency': tr[4].text_content().strip(),
'pdf': tr[5].xpath(".//a")[0].get("href")
}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
import scraperwiki, lxml.html, urllib2, re
from datetime import datetime
#html = scraperwiki.scrape("http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm")
doc = lxml.html.parse(urllib2.urlopen("http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm"))
root = doc.getroot()
#select the table that contains the offenders, ignoring the first one that contains the header row
for tr in root.xpath("//div[@id='verdiSection10']/div/div/table/tbody/tr")[1:]:
data = {
'conviction_date': datetime.strptime(
re.match("(\d+/\d+/\d+)", tr[0].text_content().strip()).group(1),
"%d/%m/%Y"), #sometimes they include two dates in the entry, so we'll have to grab the first (damnit)
'business_name': tr[1].text_content().strip(),
'business_address': tr[2].text_content().strip(),
'convicted_name': tr[3].text_content().strip(),
'agency': tr[4].text_content().strip(),
'pdf': tr[5].xpath(".//a")[0].get("href")
}
scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)
|
flexible
|
{
"blob_id": "e870900249b121f2416d7be543752ebf6392b6be",
"index": 6868,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n<mask token>\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n",
"step-3": "<mask token>\ndoc = lxml.html.parse(urllib2.urlopen(\n 'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'\n ))\nroot = doc.getroot()\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n<mask token>\ndoc = lxml.html.parse(urllib2.urlopen(\n 'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'\n ))\nroot = doc.getroot()\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n",
"step-4": "import scraperwiki, lxml.html, urllib2, re\nfrom datetime import datetime\ndoc = lxml.html.parse(urllib2.urlopen(\n 'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'\n ))\nroot = doc.getroot()\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\nimport scraperwiki, lxml.html, urllib2, re\nfrom datetime import datetime\ndoc = lxml.html.parse(urllib2.urlopen(\n 'http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm'\n ))\nroot = doc.getroot()\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {'conviction_date': datetime.strptime(re.match(\n '(\\\\d+/\\\\d+/\\\\d+)', tr[0].text_content().strip()).group(1),\n '%d/%m/%Y'), 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(), 'convicted_name':\n tr[3].text_content().strip(), 'agency': tr[4].text_content().strip(\n ), 'pdf': tr[5].xpath('.//a')[0].get('href')}\n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n",
"step-5": "import scraperwiki, lxml.html, urllib2, re\nfrom datetime import datetime\n\n#html = scraperwiki.scrape(\"http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm\")\ndoc = lxml.html.parse(urllib2.urlopen(\"http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm\"))\nroot = doc.getroot()\n\n#select the table that contains the offenders, ignoring the first one that contains the header row\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {\n 'conviction_date': datetime.strptime(\n re.match(\"(\\d+/\\d+/\\d+)\", tr[0].text_content().strip()).group(1),\n \"%d/%m/%Y\"), #sometimes they include two dates in the entry, so we'll have to grab the first (damnit)\n 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(),\n 'convicted_name': tr[3].text_content().strip(),\n 'agency': tr[4].text_content().strip(),\n 'pdf': tr[5].xpath(\".//a\")[0].get(\"href\")\n }\n \n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\nimport scraperwiki, lxml.html, urllib2, re\nfrom datetime import datetime\n\n#html = scraperwiki.scrape(\"http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm\")\ndoc = lxml.html.parse(urllib2.urlopen(\"http://www.public.health.wa.gov.au/2/1035/2/publication_of_names_of_offenders_list.pm\"))\nroot = doc.getroot()\n\n#select the table that contains the offenders, ignoring the first one that contains the header row\nfor tr in root.xpath(\"//div[@id='verdiSection10']/div/div/table/tbody/tr\")[1:]:\n data = {\n 'conviction_date': datetime.strptime(\n re.match(\"(\\d+/\\d+/\\d+)\", tr[0].text_content().strip()).group(1),\n \"%d/%m/%Y\"), #sometimes they include two dates in the entry, so we'll have to grab the first (damnit)\n 'business_name': tr[1].text_content().strip(),\n 'business_address': tr[2].text_content().strip(),\n 'convicted_name': tr[3].text_content().strip(),\n 'agency': tr[4].text_content().strip(),\n 'pdf': tr[5].xpath(\".//a\")[0].get(\"href\")\n }\n \n scraperwiki.sqlite.save(unique_keys=['pdf'], data=data)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
good_car = UnreliableCar('good car', 100, 80)
bad_car = UnreliableCar('bad car', 100, 10)
for i in range(10):
print('try to drive {} km'.format(i))
print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))
print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))
print(good_car)
print(bad_car)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
good_car = UnreliableCar('good car', 100, 80)
bad_car = UnreliableCar('bad car', 100, 10)
for i in range(10):
print('try to drive {} km'.format(i))
print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))
print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))
print(good_car)
print(bad_car)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from unreliable_car import UnreliableCar
def main():
good_car = UnreliableCar('good car', 100, 80)
bad_car = UnreliableCar('bad car', 100, 10)
for i in range(10):
print('try to drive {} km'.format(i))
print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))
print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))
print(good_car)
print(bad_car)
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""
CP1404 Practical
unreliable car test
"""
from unreliable_car import UnreliableCar
def main():
good_car = UnreliableCar("good car", 100, 80)
bad_car = UnreliableCar("bad car", 100, 10)
for i in range(10):
print("try to drive {} km".format(i))
print("{:10} drove {:2}km".format(good_car.name, good_car.drive(i)))
print("{:10} drove {:2}km".format(bad_car.name, bad_car.drive(i)))
print(good_car)
print(bad_car)
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "f29ad02f3781c7a7d2a1f0c97626dd5c7ea2417e",
"index": 7867,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom unreliable_car import UnreliableCar\n\n\ndef main():\n good_car = UnreliableCar('good car', 100, 80)\n bad_car = UnreliableCar('bad car', 100, 10)\n for i in range(10):\n print('try to drive {} km'.format(i))\n print('{:10} drove {:2}km'.format(good_car.name, good_car.drive(i)))\n print('{:10} drove {:2}km'.format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nCP1404 Practical\nunreliable car test\n\"\"\"\nfrom unreliable_car import UnreliableCar\n\n\ndef main():\n good_car = UnreliableCar(\"good car\", 100, 80)\n bad_car = UnreliableCar(\"bad car\", 100, 10)\n\n for i in range(10):\n print(\"try to drive {} km\".format(i))\n print(\"{:10} drove {:2}km\".format(good_car.name, good_car.drive(i)))\n print(\"{:10} drove {:2}km\".format(bad_car.name, bad_car.drive(i)))\n print(good_car)\n print(bad_car)\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def func(w, rc):
return 1 / np.sqrt(1 + w ** 2 * rc ** 2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def func(w, rc):
return 1 / np.sqrt(1 + w ** 2 * rc ** 2)
with open('data/phase.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header_row = next(reader)
f, U, a, b = [], [], [], []
for row in reader:
f.append(row[0])
U.append(row[1])
a.append(row[2])
b.append(row[3])
f = np.array(f, dtype=float)
U = np.array(U, dtype=float)
a = np.array(a, dtype=float)
b = np.array(b, dtype=float)
<|reserved_special_token_0|>
plt.xlabel('$f\\, / \\, Hz$')
plt.ylabel('$\\frac{U_c}{U_0}$', fontsize=15)
plt.grid()
plt.semilogx(f, U / U0, 'rx', label='Messwerte')
<|reserved_special_token_0|>
plt.semilogx(x, func(x * 2 * np.pi, a1), 'b-', label='Ausgleichsrechnung')
plt.semilogx(x, func(x * 2 * np.pi, R_th * C_th), 'g-', label='Theoriekurve')
plt.legend()
plt.savefig('plotb.pdf')
plt.show()
<|reserved_special_token_0|>
print('RC =', -a1, '+-', uncertainties[0])
print('Theoriewert:', 11.01 * 1000 * 93.3 * 10 ** -9)
print('Phase:', a / b * np.pi * 2)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def func(w, rc):
return 1 / np.sqrt(1 + w ** 2 * rc ** 2)
with open('data/phase.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header_row = next(reader)
f, U, a, b = [], [], [], []
for row in reader:
f.append(row[0])
U.append(row[1])
a.append(row[2])
b.append(row[3])
f = np.array(f, dtype=float)
U = np.array(U, dtype=float)
a = np.array(a, dtype=float)
b = np.array(b, dtype=float)
U0 = 0.6
popt, pcov = curve_fit(func, f * 2 * np.pi, U / U0)
a1 = popt[0]
R_th = 11.01 * 10 ** 3
C_th = 93.3 * 10 ** -9
plt.xlabel('$f\\, / \\, Hz$')
plt.ylabel('$\\frac{U_c}{U_0}$', fontsize=15)
plt.grid()
plt.semilogx(f, U / U0, 'rx', label='Messwerte')
x = np.linspace(20, 30000, 10000)
plt.semilogx(x, func(x * 2 * np.pi, a1), 'b-', label='Ausgleichsrechnung')
plt.semilogx(x, func(x * 2 * np.pi, R_th * C_th), 'g-', label='Theoriekurve')
plt.legend()
plt.savefig('plotb.pdf')
plt.show()
uncertainties = np.sqrt(np.diag(pcov))
print('RC =', -a1, '+-', uncertainties[0])
print('Theoriewert:', 11.01 * 1000 * 93.3 * 10 ** -9)
print('Phase:', a / b * np.pi * 2)
<|reserved_special_token_1|>
import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
def func(w, rc):
return 1 / np.sqrt(1 + w ** 2 * rc ** 2)
with open('data/phase.csv') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
header_row = next(reader)
f, U, a, b = [], [], [], []
for row in reader:
f.append(row[0])
U.append(row[1])
a.append(row[2])
b.append(row[3])
f = np.array(f, dtype=float)
U = np.array(U, dtype=float)
a = np.array(a, dtype=float)
b = np.array(b, dtype=float)
U0 = 0.6
popt, pcov = curve_fit(func, f * 2 * np.pi, U / U0)
a1 = popt[0]
R_th = 11.01 * 10 ** 3
C_th = 93.3 * 10 ** -9
plt.xlabel('$f\\, / \\, Hz$')
plt.ylabel('$\\frac{U_c}{U_0}$', fontsize=15)
plt.grid()
plt.semilogx(f, U / U0, 'rx', label='Messwerte')
x = np.linspace(20, 30000, 10000)
plt.semilogx(x, func(x * 2 * np.pi, a1), 'b-', label='Ausgleichsrechnung')
plt.semilogx(x, func(x * 2 * np.pi, R_th * C_th), 'g-', label='Theoriekurve')
plt.legend()
plt.savefig('plotb.pdf')
plt.show()
uncertainties = np.sqrt(np.diag(pcov))
print('RC =', -a1, '+-', uncertainties[0])
print('Theoriewert:', 11.01 * 1000 * 93.3 * 10 ** -9)
print('Phase:', a / b * np.pi * 2)
<|reserved_special_token_1|>
import csv
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
#funktion
def func(w,rc):
return 1/(np.sqrt(1+w**2*rc**2))
#daten einlesen
with open('data/phase.csv' ) as csvfile:
reader=csv.reader(csvfile, delimiter=',')
header_row=next(reader)
f, U, a, b = [], [], [], []
for row in reader:
f.append(row[0])
U.append(row[1])
a.append(row[2])
b.append(row[3])
f=np.array(f,dtype=float)
U=np.array(U,dtype=float)
a=np.array(a,dtype=float)
b=np.array(b,dtype=float)
#curvefit
U0=0.6
popt, pcov = curve_fit(func, f*2*np.pi, U/U0)
a1=popt[0]
#theoriewerte
R_th=11.01*10**3
C_th=93.3*10**(-9)
#plots
plt.xlabel(r'$f\, / \, Hz$')
plt.ylabel(r'$\frac{U_c}{U_0}$', fontsize=15)
plt.grid()
plt.semilogx(f,U/U0,'rx',label='Messwerte')
x=np.linspace(20,30000,10000)
plt.semilogx(x,func(x*2*np.pi,a1),'b-',label='Ausgleichsrechnung')
plt.semilogx(x,func(x*2*np.pi,R_th*C_th),'g-',label='Theoriekurve')
plt.legend()
plt.savefig('plotb.pdf')
plt.show()
#fehlerausgabe
uncertainties = np.sqrt(np.diag(pcov))
print('RC =',-a1,'+-',uncertainties[0])
print('Theoriewert:',11.01*1000*93.3*10**(-9))
print('Phase:',(a/b)*np.pi*2)
|
flexible
|
{
"blob_id": "170d0560c40f3f642f319f6113b68ab8a6bea9ef",
"index": 468,
"step-1": "<mask token>\n\n\ndef func(w, rc):\n return 1 / np.sqrt(1 + w ** 2 * rc ** 2)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func(w, rc):\n return 1 / np.sqrt(1 + w ** 2 * rc ** 2)\n\n\nwith open('data/phase.csv') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n header_row = next(reader)\n f, U, a, b = [], [], [], []\n for row in reader:\n f.append(row[0])\n U.append(row[1])\n a.append(row[2])\n b.append(row[3])\n f = np.array(f, dtype=float)\n U = np.array(U, dtype=float)\n a = np.array(a, dtype=float)\n b = np.array(b, dtype=float)\n<mask token>\nplt.xlabel('$f\\\\, / \\\\, Hz$')\nplt.ylabel('$\\\\frac{U_c}{U_0}$', fontsize=15)\nplt.grid()\nplt.semilogx(f, U / U0, 'rx', label='Messwerte')\n<mask token>\nplt.semilogx(x, func(x * 2 * np.pi, a1), 'b-', label='Ausgleichsrechnung')\nplt.semilogx(x, func(x * 2 * np.pi, R_th * C_th), 'g-', label='Theoriekurve')\nplt.legend()\nplt.savefig('plotb.pdf')\nplt.show()\n<mask token>\nprint('RC =', -a1, '+-', uncertainties[0])\nprint('Theoriewert:', 11.01 * 1000 * 93.3 * 10 ** -9)\nprint('Phase:', a / b * np.pi * 2)\n",
"step-3": "<mask token>\n\n\ndef func(w, rc):\n return 1 / np.sqrt(1 + w ** 2 * rc ** 2)\n\n\nwith open('data/phase.csv') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n header_row = next(reader)\n f, U, a, b = [], [], [], []\n for row in reader:\n f.append(row[0])\n U.append(row[1])\n a.append(row[2])\n b.append(row[3])\n f = np.array(f, dtype=float)\n U = np.array(U, dtype=float)\n a = np.array(a, dtype=float)\n b = np.array(b, dtype=float)\nU0 = 0.6\npopt, pcov = curve_fit(func, f * 2 * np.pi, U / U0)\na1 = popt[0]\nR_th = 11.01 * 10 ** 3\nC_th = 93.3 * 10 ** -9\nplt.xlabel('$f\\\\, / \\\\, Hz$')\nplt.ylabel('$\\\\frac{U_c}{U_0}$', fontsize=15)\nplt.grid()\nplt.semilogx(f, U / U0, 'rx', label='Messwerte')\nx = np.linspace(20, 30000, 10000)\nplt.semilogx(x, func(x * 2 * np.pi, a1), 'b-', label='Ausgleichsrechnung')\nplt.semilogx(x, func(x * 2 * np.pi, R_th * C_th), 'g-', label='Theoriekurve')\nplt.legend()\nplt.savefig('plotb.pdf')\nplt.show()\nuncertainties = np.sqrt(np.diag(pcov))\nprint('RC =', -a1, '+-', uncertainties[0])\nprint('Theoriewert:', 11.01 * 1000 * 93.3 * 10 ** -9)\nprint('Phase:', a / b * np.pi * 2)\n",
"step-4": "import csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\n\ndef func(w, rc):\n return 1 / np.sqrt(1 + w ** 2 * rc ** 2)\n\n\nwith open('data/phase.csv') as csvfile:\n reader = csv.reader(csvfile, delimiter=',')\n header_row = next(reader)\n f, U, a, b = [], [], [], []\n for row in reader:\n f.append(row[0])\n U.append(row[1])\n a.append(row[2])\n b.append(row[3])\n f = np.array(f, dtype=float)\n U = np.array(U, dtype=float)\n a = np.array(a, dtype=float)\n b = np.array(b, dtype=float)\nU0 = 0.6\npopt, pcov = curve_fit(func, f * 2 * np.pi, U / U0)\na1 = popt[0]\nR_th = 11.01 * 10 ** 3\nC_th = 93.3 * 10 ** -9\nplt.xlabel('$f\\\\, / \\\\, Hz$')\nplt.ylabel('$\\\\frac{U_c}{U_0}$', fontsize=15)\nplt.grid()\nplt.semilogx(f, U / U0, 'rx', label='Messwerte')\nx = np.linspace(20, 30000, 10000)\nplt.semilogx(x, func(x * 2 * np.pi, a1), 'b-', label='Ausgleichsrechnung')\nplt.semilogx(x, func(x * 2 * np.pi, R_th * C_th), 'g-', label='Theoriekurve')\nplt.legend()\nplt.savefig('plotb.pdf')\nplt.show()\nuncertainties = np.sqrt(np.diag(pcov))\nprint('RC =', -a1, '+-', uncertainties[0])\nprint('Theoriewert:', 11.01 * 1000 * 93.3 * 10 ** -9)\nprint('Phase:', a / b * np.pi * 2)\n",
"step-5": "import csv\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom scipy.optimize import curve_fit\r\n\r\n#funktion\r\ndef func(w,rc):\r\n return 1/(np.sqrt(1+w**2*rc**2))\r\n\r\n#daten einlesen\r\nwith open('data/phase.csv' ) as csvfile:\r\n reader=csv.reader(csvfile, delimiter=',')\r\n header_row=next(reader)\r\n f, U, a, b = [], [], [], []\r\n for row in reader:\r\n f.append(row[0])\r\n U.append(row[1])\r\n a.append(row[2])\r\n b.append(row[3])\r\n f=np.array(f,dtype=float)\r\n U=np.array(U,dtype=float)\r\n a=np.array(a,dtype=float)\r\n b=np.array(b,dtype=float)\r\n\r\n#curvefit\r\nU0=0.6\r\npopt, pcov = curve_fit(func, f*2*np.pi, U/U0)\r\na1=popt[0]\r\n\r\n#theoriewerte\r\nR_th=11.01*10**3\r\nC_th=93.3*10**(-9)\r\n\r\n#plots\r\nplt.xlabel(r'$f\\, / \\, Hz$')\r\nplt.ylabel(r'$\\frac{U_c}{U_0}$', fontsize=15)\r\nplt.grid()\r\nplt.semilogx(f,U/U0,'rx',label='Messwerte')\r\nx=np.linspace(20,30000,10000)\r\nplt.semilogx(x,func(x*2*np.pi,a1),'b-',label='Ausgleichsrechnung')\r\nplt.semilogx(x,func(x*2*np.pi,R_th*C_th),'g-',label='Theoriekurve')\r\nplt.legend()\r\nplt.savefig('plotb.pdf')\r\nplt.show()\r\n\r\n#fehlerausgabe\r\nuncertainties = np.sqrt(np.diag(pcov))\r\nprint('RC =',-a1,'+-',uncertainties[0])\r\nprint('Theoriewert:',11.01*1000*93.3*10**(-9))\r\nprint('Phase:',(a/b)*np.pi*2)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class MyGame(arcade.Window):
<|reserved_special_token_0|>
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
self.player_list = None
self.score = 0
self.player = None
<|reserved_special_token_0|>
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player_list.draw()
output = f'Score: {self.score}'
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyGame(arcade.Window):
<|reserved_special_token_0|>
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
self.player_list = None
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player_list.append(self.player)
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player_list.draw()
output = f'Score: {self.score}'
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
self.player_list = None
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player_list.append(self.player)
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player_list.draw()
output = f'Score: {self.score}'
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import arcade
import os
from Toad_arcade import Toad
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
SCREEN_TITLE = 'PyToads - Battletoads reimplementation'
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
MOVEMENT_SPEED = 5
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
self.player_list = None
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
self.player_list.append(self.player)
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.player_list.draw()
output = f'Score: {self.score}'
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
"""
Platformer Game
"""
import arcade
import os
from Toad_arcade import Toad
# Constants
SCREEN_WIDTH = 1920
SCREEN_HEIGHT = 1080
SCREEN_TITLE = "PyToads - Battletoads reimplementation"
# Constants used to scale our sprites from their original size
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
MOVEMENT_SPEED = 5
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
""" Set up the game and initialize the variables. """
# Sprite lists
self.player_list = None
# Set up the player
self.score = 0
self.player = None
def setup(self):
self.player_list = arcade.SpriteList()
# Set up the player
self.score = 0
self.player = Toad()
self.player.center_x = SCREEN_WIDTH // 2
self.player.center_y = SCREEN_HEIGHT // 2
#self.player.scale = 0.8
self.player_list.append(self.player)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
# Draw all the sprites.
self.player_list.draw()
# Put the text on the screen.
output = f"Score: {self.score}"
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def on_key_press(self, key, modifiers):
"""
Called whenever a key is pressed.
"""
if key == arcade.key.UP:
self.player.change_y = MOVEMENT_SPEED
elif key == arcade.key.DOWN:
self.player.change_y = -MOVEMENT_SPEED
elif key == arcade.key.LEFT:
self.player.change_x = -MOVEMENT_SPEED
elif key == arcade.key.RIGHT:
self.player.change_x = MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""
Called when the user releases a key.
"""
if key == arcade.key.UP or key == arcade.key.DOWN:
self.player.change_y = 0
elif key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.player.change_x = 0
def on_update(self, delta_time):
""" Movement and game logic """
self.player_list.update()
self.player_list.update_animation()
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "28d8f9d9b39c40c43a362e57a7907c0a38a6bd05",
"index": 748,
"step-1": "<mask token>\n\n\nclass MyGame(arcade.Window):\n <mask token>\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n <mask token>\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass MyGame(arcade.Window):\n <mask token>\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport arcade\nimport os\nfrom Toad_arcade import Toad\nSCREEN_WIDTH = 1920\nSCREEN_HEIGHT = 1080\nSCREEN_TITLE = 'PyToads - Battletoads reimplementation'\nCHARACTER_SCALING = 1\nTILE_SCALING = 0.5\nCOIN_SCALING = 0.5\nMOVEMENT_SPEED = 5\n\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n \"\"\" Set up the game and initialize the variables. \"\"\"\n self.player_list = None\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n self.score = 0\n self.player = Toad()\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n self.player_list.append(self.player)\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n arcade.start_render()\n self.player_list.draw()\n output = f'Score: {self.score}'\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nPlatformer Game\n\"\"\"\nimport arcade\nimport os\nfrom Toad_arcade import Toad\n# Constants\nSCREEN_WIDTH = 1920\nSCREEN_HEIGHT = 1080\nSCREEN_TITLE = \"PyToads - Battletoads reimplementation\"\n\n# Constants used to scale our sprites from their original size\nCHARACTER_SCALING = 1\nTILE_SCALING = 0.5\nCOIN_SCALING = 0.5\nMOVEMENT_SPEED = 5\n\nclass MyGame(arcade.Window):\n \"\"\" Main application class. \"\"\"\n\n def __init__(self, width, height, title):\n \"\"\"\n Initializer\n \"\"\"\n super().__init__(width, height, title)\n\n # Set the working directory (where we expect to find files) to the same\n # directory this .py file is in. You can leave this out of your own\n # code, but it is needed to easily run the examples using \"python -m\"\n # as mentioned at the top of this program.\n file_path = os.path.dirname(os.path.abspath(__file__))\n os.chdir(file_path)\n\n \"\"\" Set up the game and initialize the variables. \"\"\"\n\n # Sprite lists\n self.player_list = None\n\n # Set up the player\n self.score = 0\n self.player = None\n\n def setup(self):\n self.player_list = arcade.SpriteList()\n # Set up the player\n self.score = 0\n self.player = Toad()\n\n self.player.center_x = SCREEN_WIDTH // 2\n self.player.center_y = SCREEN_HEIGHT // 2\n #self.player.scale = 0.8\n\n self.player_list.append(self.player)\n # Set the background color\n arcade.set_background_color(arcade.color.AMAZON)\n\n def on_draw(self):\n \"\"\"\n Render the screen.\n \"\"\"\n # This command has to happen before we start drawing\n arcade.start_render()\n\n # Draw all the sprites.\n self.player_list.draw()\n\n # Put the text on the screen.\n output = f\"Score: {self.score}\"\n arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)\n\n def on_key_press(self, key, modifiers):\n \"\"\"\n Called whenever a key is pressed.\n \"\"\"\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED\n\n def on_key_release(self, key, modifiers):\n \"\"\"\n Called when the user releases a key.\n \"\"\"\n if key == arcade.key.UP or key == arcade.key.DOWN:\n self.player.change_y = 0\n elif key == arcade.key.LEFT or key == arcade.key.RIGHT:\n self.player.change_x = 0\n\n def on_update(self, delta_time):\n \"\"\" Movement and game logic \"\"\"\n\n self.player_list.update()\n self.player_list.update_animation()\n\n\ndef main():\n \"\"\" Main method \"\"\"\n window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n window.setup()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()",
"step-ids": [
6,
7,
9,
12,
13
]
}
|
[
6,
7,
9,
12,
13
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('viajes', '0001_initial')]
operations = [migrations.AlterModelOptions(name='viajes', options={
'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'})
]
<|reserved_special_token_1|>
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [('viajes', '0001_initial')]
operations = [migrations.AlterModelOptions(name='viajes', options={
'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'})
]
<|reserved_special_token_1|>
# Generated by Django 2.2 on 2020-10-26 15:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('viajes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='viajes',
options={'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'},
),
]
|
flexible
|
{
"blob_id": "760a5a168575a0ea12b93cb58c1e81e313704e35",
"index": 6276,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('viajes', '0001_initial')]\n operations = [migrations.AlterModelOptions(name='viajes', options={\n 'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'})\n ]\n",
"step-4": "from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('viajes', '0001_initial')]\n operations = [migrations.AlterModelOptions(name='viajes', options={\n 'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'})\n ]\n",
"step-5": "# Generated by Django 2.2 on 2020-10-26 15:16\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('viajes', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='viajes',\n options={'verbose_name': 'Movilización', 'verbose_name_plural': 'Movilización'},\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='EasyVVUQ applied to BOUT++')
parser.add_argument('--batch', '-b', help=
'Run on a batch (SLURM) system', action='store_true', default=False)
args = parser.parse_args()
campaign = uq.CampaignDask(name='Conduction.')
print(f'Running in {campaign.campaign_dir}')
encoder = boutvecma.BOUTEncoder(template_input=
'models/conduction/data/BOUT.inp')
decoder = boutvecma.BOUTDecoder(variables=['T'])
params = {'conduction:chi': {'type': 'float', 'min': 0.0, 'max': 1000.0,
'default': 1.0}, 'T:scale': {'type': 'float', 'min': 0.0, 'max':
1000.0, 'default': 1.0}, 'T:gauss_width': {'type': 'float', 'min':
0.0, 'max': 1000.0, 'default': 0.2}, 'T:gauss_centre': {'type':
'float', 'min': 0.0, 'max': 2 * np.pi, 'default': np.pi}}
campaign.add_app('1D_conduction', params=params, encoder=encoder,
decoder=decoder)
vary = {'conduction:chi': chaospy.Uniform(0.2, 4.0), 'T:scale': chaospy
.Uniform(0.5, 1.5), 'T:gauss_width': chaospy.Uniform(0.01, 0.4),
'T:gauss_centre': chaospy.Uniform(0.0, 2 * np.pi)}
sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)
campaign.set_sampler(sampler)
campaign.draw_samples()
run_dirs = campaign.populate_runs_dir()
print(f'Created run directories: {run_dirs}')
if args.batch:
cluster = SLURMCluster(job_extra=['--job-name=VVUQ',
'--account=PHYS-YPIRSE-2019'], cores=1, memory='1 GB',
processes=1, walltime='00:10:00', interface='ib0')
cluster.scale(16)
print(f'Job script:\n{cluster.job_script()}')
client = Client(cluster)
else:
client = Client(processes=True, threads_per_worker=1)
print(client)
time_start = time.time()
campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(os.path.abspath
('build/models/conduction/conduction -q -q -q -d .')), client)
client.close()
time_end = time.time()
print(f'Finished, took {time_end - time_start}')
campaign.collate()
campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler,
qoi_cols=['T']))
results = campaign.get_last_analysis()
state_filename = os.path.join(campaign.campaign_dir, 'campaign_state.json')
campaign.save_state(state_filename)
plt.figure()
results.plot_moments('T', xlabel='$\\rho$', filename=
f'{campaign.campaign_dir}/moments.png')
plt.figure()
results.plot_sobols_first('T', xlabel='$\\rho$', filename=
f'{campaign.campaign_dir}/sobols_first.png')
<|reserved_special_token_1|>
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
from dask.distributed import Client
from dask_jobqueue import SLURMCluster
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='EasyVVUQ applied to BOUT++')
parser.add_argument('--batch', '-b', help=
'Run on a batch (SLURM) system', action='store_true', default=False)
args = parser.parse_args()
campaign = uq.CampaignDask(name='Conduction.')
print(f'Running in {campaign.campaign_dir}')
encoder = boutvecma.BOUTEncoder(template_input=
'models/conduction/data/BOUT.inp')
decoder = boutvecma.BOUTDecoder(variables=['T'])
params = {'conduction:chi': {'type': 'float', 'min': 0.0, 'max': 1000.0,
'default': 1.0}, 'T:scale': {'type': 'float', 'min': 0.0, 'max':
1000.0, 'default': 1.0}, 'T:gauss_width': {'type': 'float', 'min':
0.0, 'max': 1000.0, 'default': 0.2}, 'T:gauss_centre': {'type':
'float', 'min': 0.0, 'max': 2 * np.pi, 'default': np.pi}}
campaign.add_app('1D_conduction', params=params, encoder=encoder,
decoder=decoder)
vary = {'conduction:chi': chaospy.Uniform(0.2, 4.0), 'T:scale': chaospy
.Uniform(0.5, 1.5), 'T:gauss_width': chaospy.Uniform(0.01, 0.4),
'T:gauss_centre': chaospy.Uniform(0.0, 2 * np.pi)}
sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)
campaign.set_sampler(sampler)
campaign.draw_samples()
run_dirs = campaign.populate_runs_dir()
print(f'Created run directories: {run_dirs}')
if args.batch:
cluster = SLURMCluster(job_extra=['--job-name=VVUQ',
'--account=PHYS-YPIRSE-2019'], cores=1, memory='1 GB',
processes=1, walltime='00:10:00', interface='ib0')
cluster.scale(16)
print(f'Job script:\n{cluster.job_script()}')
client = Client(cluster)
else:
client = Client(processes=True, threads_per_worker=1)
print(client)
time_start = time.time()
campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(os.path.abspath
('build/models/conduction/conduction -q -q -q -d .')), client)
client.close()
time_end = time.time()
print(f'Finished, took {time_end - time_start}')
campaign.collate()
campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler,
qoi_cols=['T']))
results = campaign.get_last_analysis()
state_filename = os.path.join(campaign.campaign_dir, 'campaign_state.json')
campaign.save_state(state_filename)
plt.figure()
results.plot_moments('T', xlabel='$\\rho$', filename=
f'{campaign.campaign_dir}/moments.png')
plt.figure()
results.plot_sobols_first('T', xlabel='$\\rho$', filename=
f'{campaign.campaign_dir}/sobols_first.png')
<|reserved_special_token_1|>
#!/usr/bin/env python3
import argparse
import boutvecma
import easyvvuq as uq
import chaospy
import os
import numpy as np
import time
from dask.distributed import Client
from dask_jobqueue import SLURMCluster
import matplotlib.pyplot as plt
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="EasyVVUQ applied to BOUT++")
parser.add_argument(
"--batch",
"-b",
help="Run on a batch (SLURM) system",
action="store_true",
default=False,
)
args = parser.parse_args()
campaign = uq.CampaignDask(name="Conduction.")
print(f"Running in {campaign.campaign_dir}")
encoder = boutvecma.BOUTEncoder(template_input="models/conduction/data/BOUT.inp")
decoder = boutvecma.BOUTDecoder(variables=["T"])
params = {
"conduction:chi": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:scale": {"type": "float", "min": 0.0, "max": 1e3, "default": 1.0},
"T:gauss_width": {"type": "float", "min": 0.0, "max": 1e3, "default": 0.2},
"T:gauss_centre": {
"type": "float",
"min": 0.0,
"max": 2 * np.pi,
"default": np.pi,
},
}
campaign.add_app("1D_conduction", params=params, encoder=encoder, decoder=decoder)
vary = {
"conduction:chi": chaospy.Uniform(0.2, 4.0),
"T:scale": chaospy.Uniform(0.5, 1.5),
"T:gauss_width": chaospy.Uniform(0.01, 0.4),
"T:gauss_centre": chaospy.Uniform(0.0, 2 * np.pi),
}
sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)
campaign.set_sampler(sampler)
campaign.draw_samples()
run_dirs = campaign.populate_runs_dir()
print(f"Created run directories: {run_dirs}")
if args.batch:
# Example of use on Viking
cluster = SLURMCluster(
job_extra=[
"--job-name=VVUQ",
"--account=PHYS-YPIRSE-2019",
],
cores=1,
memory="1 GB",
processes=1,
walltime="00:10:00",
interface="ib0",
)
cluster.scale(16)
print(f"Job script:\n{cluster.job_script()}")
client = Client(cluster)
else:
client = Client(processes=True, threads_per_worker=1)
print(client)
time_start = time.time()
campaign.apply_for_each_run_dir(
uq.actions.ExecuteLocal(
os.path.abspath("build/models/conduction/conduction -q -q -q -d .")
),
client,
)
client.close()
time_end = time.time()
print(f"Finished, took {time_end - time_start}")
campaign.collate()
campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler, qoi_cols=["T"]))
results = campaign.get_last_analysis()
state_filename = os.path.join(campaign.campaign_dir, "campaign_state.json")
campaign.save_state(state_filename)
plt.figure()
results.plot_moments(
"T", xlabel=r"$\rho$", filename=f"{campaign.campaign_dir}/moments.png"
)
plt.figure()
results.plot_sobols_first(
"T", xlabel=r"$\rho$", filename=f"{campaign.campaign_dir}/sobols_first.png"
)
|
flexible
|
{
"blob_id": "416f4c6bbd2f2b9562ab2d1477df4ebc45070d8d",
"index": 5060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='EasyVVUQ applied to BOUT++')\n parser.add_argument('--batch', '-b', help=\n 'Run on a batch (SLURM) system', action='store_true', default=False)\n args = parser.parse_args()\n campaign = uq.CampaignDask(name='Conduction.')\n print(f'Running in {campaign.campaign_dir}')\n encoder = boutvecma.BOUTEncoder(template_input=\n 'models/conduction/data/BOUT.inp')\n decoder = boutvecma.BOUTDecoder(variables=['T'])\n params = {'conduction:chi': {'type': 'float', 'min': 0.0, 'max': 1000.0,\n 'default': 1.0}, 'T:scale': {'type': 'float', 'min': 0.0, 'max': \n 1000.0, 'default': 1.0}, 'T:gauss_width': {'type': 'float', 'min': \n 0.0, 'max': 1000.0, 'default': 0.2}, 'T:gauss_centre': {'type':\n 'float', 'min': 0.0, 'max': 2 * np.pi, 'default': np.pi}}\n campaign.add_app('1D_conduction', params=params, encoder=encoder,\n decoder=decoder)\n vary = {'conduction:chi': chaospy.Uniform(0.2, 4.0), 'T:scale': chaospy\n .Uniform(0.5, 1.5), 'T:gauss_width': chaospy.Uniform(0.01, 0.4),\n 'T:gauss_centre': chaospy.Uniform(0.0, 2 * np.pi)}\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n campaign.draw_samples()\n run_dirs = campaign.populate_runs_dir()\n print(f'Created run directories: {run_dirs}')\n if args.batch:\n cluster = SLURMCluster(job_extra=['--job-name=VVUQ',\n '--account=PHYS-YPIRSE-2019'], cores=1, memory='1 GB',\n processes=1, walltime='00:10:00', interface='ib0')\n cluster.scale(16)\n print(f'Job script:\\n{cluster.job_script()}')\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n print(client)\n time_start = time.time()\n campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(os.path.abspath\n ('build/models/conduction/conduction -q -q -q -d .')), client)\n client.close()\n time_end = time.time()\n print(f'Finished, took {time_end - time_start}')\n campaign.collate()\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler,\n qoi_cols=['T']))\n results = campaign.get_last_analysis()\n state_filename = os.path.join(campaign.campaign_dir, 'campaign_state.json')\n campaign.save_state(state_filename)\n plt.figure()\n results.plot_moments('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/moments.png')\n plt.figure()\n results.plot_sobols_first('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/sobols_first.png')\n",
"step-3": "import argparse\nimport boutvecma\nimport easyvvuq as uq\nimport chaospy\nimport os\nimport numpy as np\nimport time\nfrom dask.distributed import Client\nfrom dask_jobqueue import SLURMCluster\nimport matplotlib.pyplot as plt\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='EasyVVUQ applied to BOUT++')\n parser.add_argument('--batch', '-b', help=\n 'Run on a batch (SLURM) system', action='store_true', default=False)\n args = parser.parse_args()\n campaign = uq.CampaignDask(name='Conduction.')\n print(f'Running in {campaign.campaign_dir}')\n encoder = boutvecma.BOUTEncoder(template_input=\n 'models/conduction/data/BOUT.inp')\n decoder = boutvecma.BOUTDecoder(variables=['T'])\n params = {'conduction:chi': {'type': 'float', 'min': 0.0, 'max': 1000.0,\n 'default': 1.0}, 'T:scale': {'type': 'float', 'min': 0.0, 'max': \n 1000.0, 'default': 1.0}, 'T:gauss_width': {'type': 'float', 'min': \n 0.0, 'max': 1000.0, 'default': 0.2}, 'T:gauss_centre': {'type':\n 'float', 'min': 0.0, 'max': 2 * np.pi, 'default': np.pi}}\n campaign.add_app('1D_conduction', params=params, encoder=encoder,\n decoder=decoder)\n vary = {'conduction:chi': chaospy.Uniform(0.2, 4.0), 'T:scale': chaospy\n .Uniform(0.5, 1.5), 'T:gauss_width': chaospy.Uniform(0.01, 0.4),\n 'T:gauss_centre': chaospy.Uniform(0.0, 2 * np.pi)}\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n campaign.draw_samples()\n run_dirs = campaign.populate_runs_dir()\n print(f'Created run directories: {run_dirs}')\n if args.batch:\n cluster = SLURMCluster(job_extra=['--job-name=VVUQ',\n '--account=PHYS-YPIRSE-2019'], cores=1, memory='1 GB',\n processes=1, walltime='00:10:00', interface='ib0')\n cluster.scale(16)\n print(f'Job script:\\n{cluster.job_script()}')\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n print(client)\n time_start = time.time()\n campaign.apply_for_each_run_dir(uq.actions.ExecuteLocal(os.path.abspath\n ('build/models/conduction/conduction -q -q -q -d .')), client)\n client.close()\n time_end = time.time()\n print(f'Finished, took {time_end - time_start}')\n campaign.collate()\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler,\n qoi_cols=['T']))\n results = campaign.get_last_analysis()\n state_filename = os.path.join(campaign.campaign_dir, 'campaign_state.json')\n campaign.save_state(state_filename)\n plt.figure()\n results.plot_moments('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/moments.png')\n plt.figure()\n results.plot_sobols_first('T', xlabel='$\\\\rho$', filename=\n f'{campaign.campaign_dir}/sobols_first.png')\n",
"step-4": "#!/usr/bin/env python3\n\nimport argparse\nimport boutvecma\nimport easyvvuq as uq\nimport chaospy\nimport os\nimport numpy as np\nimport time\nfrom dask.distributed import Client\nfrom dask_jobqueue import SLURMCluster\nimport matplotlib.pyplot as plt\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description=\"EasyVVUQ applied to BOUT++\")\n parser.add_argument(\n \"--batch\",\n \"-b\",\n help=\"Run on a batch (SLURM) system\",\n action=\"store_true\",\n default=False,\n )\n args = parser.parse_args()\n\n campaign = uq.CampaignDask(name=\"Conduction.\")\n print(f\"Running in {campaign.campaign_dir}\")\n encoder = boutvecma.BOUTEncoder(template_input=\"models/conduction/data/BOUT.inp\")\n decoder = boutvecma.BOUTDecoder(variables=[\"T\"])\n params = {\n \"conduction:chi\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:scale\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 1.0},\n \"T:gauss_width\": {\"type\": \"float\", \"min\": 0.0, \"max\": 1e3, \"default\": 0.2},\n \"T:gauss_centre\": {\n \"type\": \"float\",\n \"min\": 0.0,\n \"max\": 2 * np.pi,\n \"default\": np.pi,\n },\n }\n\n campaign.add_app(\"1D_conduction\", params=params, encoder=encoder, decoder=decoder)\n\n vary = {\n \"conduction:chi\": chaospy.Uniform(0.2, 4.0),\n \"T:scale\": chaospy.Uniform(0.5, 1.5),\n \"T:gauss_width\": chaospy.Uniform(0.01, 0.4),\n \"T:gauss_centre\": chaospy.Uniform(0.0, 2 * np.pi),\n }\n\n sampler = uq.sampling.PCESampler(vary=vary, polynomial_order=3)\n campaign.set_sampler(sampler)\n\n campaign.draw_samples()\n\n run_dirs = campaign.populate_runs_dir()\n\n print(f\"Created run directories: {run_dirs}\")\n\n if args.batch:\n # Example of use on Viking\n cluster = SLURMCluster(\n job_extra=[\n \"--job-name=VVUQ\",\n \"--account=PHYS-YPIRSE-2019\",\n ],\n cores=1,\n memory=\"1 GB\",\n processes=1,\n walltime=\"00:10:00\",\n interface=\"ib0\",\n )\n cluster.scale(16)\n print(f\"Job script:\\n{cluster.job_script()}\")\n client = Client(cluster)\n else:\n client = Client(processes=True, threads_per_worker=1)\n\n print(client)\n\n time_start = time.time()\n campaign.apply_for_each_run_dir(\n uq.actions.ExecuteLocal(\n os.path.abspath(\"build/models/conduction/conduction -q -q -q -d .\")\n ),\n client,\n )\n client.close()\n\n time_end = time.time()\n\n print(f\"Finished, took {time_end - time_start}\")\n\n campaign.collate()\n\n campaign.apply_analysis(uq.analysis.PCEAnalysis(sampler=sampler, qoi_cols=[\"T\"]))\n\n results = campaign.get_last_analysis()\n\n state_filename = os.path.join(campaign.campaign_dir, \"campaign_state.json\")\n campaign.save_state(state_filename)\n\n plt.figure()\n results.plot_moments(\n \"T\", xlabel=r\"$\\rho$\", filename=f\"{campaign.campaign_dir}/moments.png\"\n )\n plt.figure()\n results.plot_sobols_first(\n \"T\", xlabel=r\"$\\rho$\", filename=f\"{campaign.campaign_dir}/sobols_first.png\"\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Cores no terminal
"""
a = 3
b = 5
print('Os valores são \033[32m{}\033[m e \033[31m{}\033[m !!!'.format(a, b))
# Dicionário de cores:
nome = 'Kátia'
cores = {'limpa':'\033]m',
'azul':'\033[34m',
'amarelo':'\033[33m',
'pretoebranco':'\033[7;30m'}
print('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores['amarelo']))
# dá pra colocar as cores dentro das chaves tb.
|
normal
|
{
"blob_id": "7bbbd30ba1578c1165ccf5c2fff22609c16dfd64",
"index": 393,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Os valores são \\x1b[32m{}\\x1b[m e \\x1b[31m{}\\x1b[m !!!'.format(a, b))\n<mask token>\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[\n 'amarelo']))\n",
"step-3": "<mask token>\na = 3\nb = 5\nprint('Os valores são \\x1b[32m{}\\x1b[m e \\x1b[31m{}\\x1b[m !!!'.format(a, b))\nnome = 'Kátia'\ncores = {'limpa': '\\x1b]m', 'azul': '\\x1b[34m', 'amarelo': '\\x1b[33m',\n 'pretoebranco': '\\x1b[7;30m'}\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores[\n 'amarelo']))\n",
"step-4": "\"\"\"\r\nCores no terminal\r\n\"\"\"\r\n\r\na = 3\r\nb = 5\r\nprint('Os valores são \\033[32m{}\\033[m e \\033[31m{}\\033[m !!!'.format(a, b))\r\n\r\n# Dicionário de cores:\r\nnome = 'Kátia'\r\ncores = {'limpa':'\\033]m',\r\n 'azul':'\\033[34m',\r\n 'amarelo':'\\033[33m',\r\n 'pretoebranco':'\\033[7;30m'}\r\n\r\nprint('Prazer em te conhecer, {}{}{}!!!'.format(cores['azul'], nome, cores['amarelo']))\r\n# dá pra colocar as cores dentro das chaves tb.\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def individual(list_df: list, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
for df in list_df:
df.plot()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
df_labels = []
for df in list_df:
df_labels.append(df.columns[df_col_index])
df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in
zip(list_df, df_labels)})
df_all.plot()
def individual(list_df: list, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
for df in list_df:
df.plot()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__author__ = 'alexglenday'
def group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
df_labels = []
for df in list_df:
df_labels.append(df.columns[df_col_index])
df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in
zip(list_df, df_labels)})
df_all.plot()
def individual(list_df: list, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
for df in list_df:
df.plot()
<|reserved_special_token_1|>
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
__author__ = 'alexglenday'
def group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
df_labels = []
for df in list_df:
df_labels.append(df.columns[df_col_index])
df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in
zip(list_df, df_labels)})
df_all.plot()
def individual(list_df: list, seaborn_context: str='poster'):
sns.set_context(seaborn_context)
for df in list_df:
df.plot()
|
flexible
|
{
"blob_id": "d2632461fcdc39509610b96d43dd1ec42dae362f",
"index": 5229,
"step-1": "<mask token>\n\n\ndef individual(list_df: list, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n for df in list_df:\n df.plot()\n",
"step-2": "<mask token>\n\n\ndef group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n df_labels = []\n for df in list_df:\n df_labels.append(df.columns[df_col_index])\n df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in\n zip(list_df, df_labels)})\n df_all.plot()\n\n\ndef individual(list_df: list, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n for df in list_df:\n df.plot()\n",
"step-3": "<mask token>\n__author__ = 'alexglenday'\n\n\ndef group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n df_labels = []\n for df in list_df:\n df_labels.append(df.columns[df_col_index])\n df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in\n zip(list_df, df_labels)})\n df_all.plot()\n\n\ndef individual(list_df: list, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n for df in list_df:\n df.plot()\n",
"step-4": "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n__author__ = 'alexglenday'\n\n\ndef group(list_df: list, df_col_index: int=0, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n df_labels = []\n for df in list_df:\n df_labels.append(df.columns[df_col_index])\n df_all = pd.DataFrame({label: df.iloc[:, df_col_index] for df, label in\n zip(list_df, df_labels)})\n df_all.plot()\n\n\ndef individual(list_df: list, seaborn_context: str='poster'):\n sns.set_context(seaborn_context)\n for df in list_df:\n df.plot()\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
from flask import Blueprint
views = Blueprint('views', __name__)
from . import routes
|
normal
|
{
"blob_id": "139ccdaf7acb2a2d74649f0c32217d1fe71a954a",
"index": 4800,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nviews = Blueprint('views', __name__)\n<mask token>\n",
"step-3": "from flask import Blueprint\nviews = Blueprint('views', __name__)\nfrom . import routes\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
__author__ = 'wxy'
class ListProcess(object):
def __init__(self, rsp, nickname):
self.rsp = rsp
self.nickname = nickname
def get_friend_uin(self):
try:
for list in self.rsp['result']['info']:
if list['nick'] == self.nickname:
tar_uin = list['uin']
return tar_uin
except:
return False
def get_group_uin(self):
try:
for list in self.rsp['result']['gnamelist']:
if list['name'] == self.nickname:
print '++++++++++++++++++++++++++++++++++'
print list
tar_uin = list['gid']
return tar_uin
except:
return False
|
normal
|
{
"blob_id": "1154fd3883dc8856e24127d56ce6a983308dc1aa",
"index": 3683,
"step-1": "# -*- coding: utf-8 -*-\n__author__ = 'wxy'\n\nclass ListProcess(object):\n def __init__(self, rsp, nickname):\n self.rsp = rsp\n self.nickname = nickname\n\n def get_friend_uin(self):\n try:\n for list in self.rsp['result']['info']:\n if list['nick'] == self.nickname:\n tar_uin = list['uin']\n return tar_uin\n except:\n return False\n\n def get_group_uin(self):\n try:\n for list in self.rsp['result']['gnamelist']:\n if list['name'] == self.nickname:\n print '++++++++++++++++++++++++++++++++++'\n print list\n tar_uin = list['gid']\n return tar_uin\n except:\n return False\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def retry(retry_count=2, delay=5, action_description='not specified',
allowed_exceptions=()):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for _ in range(retry_count):
try:
return func(*args, **kwargs)
except allowed_exceptions as e:
print('Error executing {}: {}'.format(func.__name__, e))
print('Waiting for {} sec before executing {} again'.
format(delay, func.__name__))
sleep(delay)
print('Retrying to execute ' + func.__name__ +
' (action: ' + action_description + ')')
return wrapper
return decorator
<|reserved_special_token_1|>
from functools import wraps
from time import sleep
def retry(retry_count=2, delay=5, action_description='not specified',
allowed_exceptions=()):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for _ in range(retry_count):
try:
return func(*args, **kwargs)
except allowed_exceptions as e:
print('Error executing {}: {}'.format(func.__name__, e))
print('Waiting for {} sec before executing {} again'.
format(delay, func.__name__))
sleep(delay)
print('Retrying to execute ' + func.__name__ +
' (action: ' + action_description + ')')
return wrapper
return decorator
<|reserved_special_token_1|>
from functools import wraps
from time import sleep
def retry(retry_count = 2, delay = 5, action_description = 'not specified', allowed_exceptions=()):
def decorator(func):
@wraps(func) # to preserve metadata of the function to be decorated
def wrapper(*args, **kwargs):
for _ in range(retry_count):
try:
return func(*args, **kwargs)
except allowed_exceptions as e:
print('Error executing {}: {}'.format(func.__name__, e))
print('Waiting for {} sec before executing {} again'.format(delay, func.__name__))
sleep(delay)
print('Retrying to execute ' + func.__name__ + ' (action: ' + action_description + ')')
return wrapper
return decorator
|
flexible
|
{
"blob_id": "79e4592d5ea84cc7c97d68a9390eb5d387045cf0",
"index": 4344,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef retry(retry_count=2, delay=5, action_description='not specified',\n allowed_exceptions=()):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n for _ in range(retry_count):\n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.\n format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ +\n ' (action: ' + action_description + ')')\n return wrapper\n return decorator\n",
"step-3": "from functools import wraps\nfrom time import sleep\n\n\ndef retry(retry_count=2, delay=5, action_description='not specified',\n allowed_exceptions=()):\n\n def decorator(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n for _ in range(retry_count):\n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.\n format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ +\n ' (action: ' + action_description + ')')\n return wrapper\n return decorator\n",
"step-4": "from functools import wraps\nfrom time import sleep\n\ndef retry(retry_count = 2, delay = 5, action_description = 'not specified', allowed_exceptions=()):\n def decorator(func):\n @wraps(func) # to preserve metadata of the function to be decorated\n def wrapper(*args, **kwargs):\n for _ in range(retry_count): \n try:\n return func(*args, **kwargs)\n except allowed_exceptions as e:\n print('Error executing {}: {}'.format(func.__name__, e))\n print('Waiting for {} sec before executing {} again'.format(delay, func.__name__))\n sleep(delay)\n print('Retrying to execute ' + func.__name__ + ' (action: ' + action_description + ')')\n return wrapper\n return decorator",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from celery import Celery
app = Celery('task', include=['task.tasks'])
app.config_from_object('task.config')
if __name__ == '__main__':
app.start()
|
normal
|
{
"blob_id": "68d9f77f91a13c73373c323ef0edbe18af9990a3",
"index": 4321,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp.config_from_object('task.config')\nif __name__ == '__main__':\n app.start()\n",
"step-3": "<mask token>\napp = Celery('task', include=['task.tasks'])\napp.config_from_object('task.config')\nif __name__ == '__main__':\n app.start()\n",
"step-4": "from celery import Celery\napp = Celery('task', include=['task.tasks'])\napp.config_from_object('task.config')\nif __name__ == '__main__':\n app.start()\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom celery import Celery\n\napp = Celery('task', include=['task.tasks'])\n\napp.config_from_object('task.config')\n\nif __name__ == '__main__':\n app.start()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def main():
sorts = ['selection-sort', 'insertion-sort', 'shell-sort']
for sort in sorts:
exe_path = './build/{}'.format(sort.rstrip())
if not os.path.isfile(exe_path):
raise OSError('The executable {} does not exist.'.format(exe_path))
accumulated_time = 0
for i in range(N):
b_output = subprocess.check_output(' '.join([exe_path, DATA]),
shell=True)
str_output = str(b_output)
accumulated_time += int(re.findall('\\d+', str_output)[0])
average_time = accumulated_time / N
if 'selection-sort' == sort:
print('{:>14} took {:>8} ns on average.'.format(sort, int(
average_time)))
sel_sort_time = average_time
else:
print(
'{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'
.format(sort, int(average_time), sel_sort_time / average_time))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
assert sys.version_info[0] >= 3, 'This script requires Python 3.x'
assert os.getcwd().split('/')[-1
] == 'algorithms-sedgewick-wayne', "This script must be run from the project's root directory."
<|reserved_special_token_0|>
def main():
sorts = ['selection-sort', 'insertion-sort', 'shell-sort']
for sort in sorts:
exe_path = './build/{}'.format(sort.rstrip())
if not os.path.isfile(exe_path):
raise OSError('The executable {} does not exist.'.format(exe_path))
accumulated_time = 0
for i in range(N):
b_output = subprocess.check_output(' '.join([exe_path, DATA]),
shell=True)
str_output = str(b_output)
accumulated_time += int(re.findall('\\d+', str_output)[0])
average_time = accumulated_time / N
if 'selection-sort' == sort:
print('{:>14} took {:>8} ns on average.'.format(sort, int(
average_time)))
sel_sort_time = average_time
else:
print(
'{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'
.format(sort, int(average_time), sel_sort_time / average_time))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
assert sys.version_info[0] >= 3, 'This script requires Python 3.x'
assert os.getcwd().split('/')[-1
] == 'algorithms-sedgewick-wayne', "This script must be run from the project's root directory."
N = 25
DATA = './algs4-data/medTale.txt'
def main():
sorts = ['selection-sort', 'insertion-sort', 'shell-sort']
for sort in sorts:
exe_path = './build/{}'.format(sort.rstrip())
if not os.path.isfile(exe_path):
raise OSError('The executable {} does not exist.'.format(exe_path))
accumulated_time = 0
for i in range(N):
b_output = subprocess.check_output(' '.join([exe_path, DATA]),
shell=True)
str_output = str(b_output)
accumulated_time += int(re.findall('\\d+', str_output)[0])
average_time = accumulated_time / N
if 'selection-sort' == sort:
print('{:>14} took {:>8} ns on average.'.format(sort, int(
average_time)))
sel_sort_time = average_time
else:
print(
'{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'
.format(sort, int(average_time), sel_sort_time / average_time))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import os
import sys
import re
import subprocess
assert sys.version_info[0] >= 3, 'This script requires Python 3.x'
assert os.getcwd().split('/')[-1
] == 'algorithms-sedgewick-wayne', "This script must be run from the project's root directory."
N = 25
DATA = './algs4-data/medTale.txt'
def main():
sorts = ['selection-sort', 'insertion-sort', 'shell-sort']
for sort in sorts:
exe_path = './build/{}'.format(sort.rstrip())
if not os.path.isfile(exe_path):
raise OSError('The executable {} does not exist.'.format(exe_path))
accumulated_time = 0
for i in range(N):
b_output = subprocess.check_output(' '.join([exe_path, DATA]),
shell=True)
str_output = str(b_output)
accumulated_time += int(re.findall('\\d+', str_output)[0])
average_time = accumulated_time / N
if 'selection-sort' == sort:
print('{:>14} took {:>8} ns on average.'.format(sort, int(
average_time)))
sel_sort_time = average_time
else:
print(
'{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'
.format(sort, int(average_time), sel_sort_time / average_time))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
#!/usr/bin/env python3
#
# compare-sorts.py
# Copyright (c) 2017 Dylan Brown. All rights reserved.
#
# Use Python 3. Run from within the scripts/ directory.
import os
import sys
import re
import subprocess
# Ensure we don't silently fail by running Python 2.
assert sys.version_info[0] >= 3, "This script requires Python 3.x"
assert os.getcwd().split("/")[-1] == "algorithms-sedgewick-wayne", \
"This script must be run from the project's root directory."
# Number of iterations to average over.
N = 25
# Data file to sort.
# DATA = "./algs4-data/words3.txt"
DATA = "./algs4-data/medTale.txt"
def main():
sorts = ["selection-sort",
"insertion-sort",
"shell-sort"]
for sort in sorts:
exe_path = "./build/{}".format(sort.rstrip())
if not os.path.isfile(exe_path):
raise OSError("The executable {} does not exist.".format(exe_path))
accumulated_time = 0
for i in range(N):
# Note shell=True has security implications. Don't accept external inputs.
b_output = subprocess.check_output(" ".join([exe_path, DATA]), shell=True)
str_output = str(b_output)
# Use regex to extract the number follwing "(ns) =" in the output.
accumulated_time += int(re.findall("\d+", str_output)[0]) # Elapsed time in nanoseconds.
average_time = accumulated_time / N
if "selection-sort" == sort:
print("{:>14} took {:>8} ns on average.".format(sort, int(average_time)))
sel_sort_time = average_time
else:
print("{:>14} took {:>8} ns on average, "
"a {:4.1f}x speedup over selection sort.".format(sort,
int(average_time),
sel_sort_time / average_time))
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "501d50fa933f55c178b4b2eba6cfc5b85592beaa",
"index": 8473,
"step-1": "<mask token>\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\n<mask token>\n",
"step-2": "<mask token>\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\n<mask token>\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\nN = 25\nDATA = './algs4-data/medTale.txt'\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nimport re\nimport subprocess\nassert sys.version_info[0] >= 3, 'This script requires Python 3.x'\nassert os.getcwd().split('/')[-1\n ] == 'algorithms-sedgewick-wayne', \"This script must be run from the project's root directory.\"\nN = 25\nDATA = './algs4-data/medTale.txt'\n\n\ndef main():\n sorts = ['selection-sort', 'insertion-sort', 'shell-sort']\n for sort in sorts:\n exe_path = './build/{}'.format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError('The executable {} does not exist.'.format(exe_path))\n accumulated_time = 0\n for i in range(N):\n b_output = subprocess.check_output(' '.join([exe_path, DATA]),\n shell=True)\n str_output = str(b_output)\n accumulated_time += int(re.findall('\\\\d+', str_output)[0])\n average_time = accumulated_time / N\n if 'selection-sort' == sort:\n print('{:>14} took {:>8} ns on average.'.format(sort, int(\n average_time)))\n sel_sort_time = average_time\n else:\n print(\n '{:>14} took {:>8} ns on average, a {:4.1f}x speedup over selection sort.'\n .format(sort, int(average_time), sel_sort_time / average_time))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python3\n#\n# compare-sorts.py\n# Copyright (c) 2017 Dylan Brown. All rights reserved.\n#\n\n# Use Python 3. Run from within the scripts/ directory.\n\nimport os\nimport sys\nimport re\nimport subprocess\n\n# Ensure we don't silently fail by running Python 2.\nassert sys.version_info[0] >= 3, \"This script requires Python 3.x\"\nassert os.getcwd().split(\"/\")[-1] == \"algorithms-sedgewick-wayne\", \\\n \"This script must be run from the project's root directory.\"\n\n# Number of iterations to average over.\nN = 25\n\n# Data file to sort.\n# DATA = \"./algs4-data/words3.txt\"\nDATA = \"./algs4-data/medTale.txt\"\n\ndef main():\n sorts = [\"selection-sort\",\n \"insertion-sort\",\n \"shell-sort\"]\n\n for sort in sorts:\n exe_path = \"./build/{}\".format(sort.rstrip())\n if not os.path.isfile(exe_path):\n raise OSError(\"The executable {} does not exist.\".format(exe_path))\n\n accumulated_time = 0\n for i in range(N):\n # Note shell=True has security implications. Don't accept external inputs.\n b_output = subprocess.check_output(\" \".join([exe_path, DATA]), shell=True)\n str_output = str(b_output)\n # Use regex to extract the number follwing \"(ns) =\" in the output.\n accumulated_time += int(re.findall(\"\\d+\", str_output)[0]) # Elapsed time in nanoseconds.\n average_time = accumulated_time / N\n\n if \"selection-sort\" == sort:\n print(\"{:>14} took {:>8} ns on average.\".format(sort, int(average_time)))\n sel_sort_time = average_time\n else:\n print(\"{:>14} took {:>8} ns on average, \"\n \"a {:4.1f}x speedup over selection sort.\".format(sort,\n int(average_time),\n sel_sort_time / average_time))\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class GeneralInformation(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
ordering = ['name']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GeneralInformation(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
ordering = ['name']
def __str__(self):
return '{} {} {}'.format(self.name, self.address, self.city)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class GeneralInformation(models.Model):
name = models.CharField(max_length=100)
address = models.TextField()
city = models.CharField(max_length=20)
class Meta:
ordering = ['name']
def __str__(self):
return '{} {} {}'.format(self.name, self.address, self.city)
<|reserved_special_token_1|>
from django.db import models
class GeneralInformation(models.Model):
name = models.CharField(max_length=100)
address = models.TextField()
city = models.CharField(max_length=20)
class Meta:
ordering = ['name']
def __str__(self):
return '{} {} {}'.format(self.name, self.address, self.city)
<|reserved_special_token_1|>
from django.db import models
# Create your models here.
class GeneralInformation(models.Model):
name = models.CharField(max_length=100)
address = models.TextField()
city = models.CharField(max_length=20)
class Meta:
ordering = ['name']
def __str__(self):
return "{} {} {}".format(self.name, self.address, self.city)
|
flexible
|
{
"blob_id": "d0f83e3b7eb5e1bc81a56e46043f394757437af8",
"index": 5504,
"step-1": "<mask token>\n\n\nclass GeneralInformation(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['name']\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GeneralInformation(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return '{} {} {}'.format(self.name, self.address, self.city)\n",
"step-3": "<mask token>\n\n\nclass GeneralInformation(models.Model):\n name = models.CharField(max_length=100)\n address = models.TextField()\n city = models.CharField(max_length=20)\n\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return '{} {} {}'.format(self.name, self.address, self.city)\n",
"step-4": "from django.db import models\n\n\nclass GeneralInformation(models.Model):\n name = models.CharField(max_length=100)\n address = models.TextField()\n city = models.CharField(max_length=20)\n\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return '{} {} {}'.format(self.name, self.address, self.city)\n",
"step-5": "from django.db import models\n\n\n# Create your models here.\n\nclass GeneralInformation(models.Model):\n name = models.CharField(max_length=100)\n address = models.TextField()\n city = models.CharField(max_length=20)\n\n class Meta:\n ordering = ['name']\n\n def __str__(self):\n return \"{} {} {}\".format(self.name, self.address, self.city)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
class CRICAgent(DataSourceAgent):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def run(self):
if self.refresh_interval is None:
self.refresh_interval = 60
while True:
self.update()
time.sleep(self.refresh_interval)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CRICAgent(DataSourceAgent):
def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):
super().__init__(dbinfo, name, namespace)
self.uri = self.ensure_field(cfg, 'uri')
self.local_asn = cfg.get('local_asn', None)
self.refresh_interval = cfg.get('refresh_interval', None)
self.netroute_map = dict()
logging.info('Loading databases')
self.db = [self.request_db(t) for t in ['endpoint']]
<|reserved_special_token_0|>
def run(self):
if self.refresh_interval is None:
self.refresh_interval = 60
while True:
self.update()
time.sleep(self.refresh_interval)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class CRICAgent(DataSourceAgent):
def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):
super().__init__(dbinfo, name, namespace)
self.uri = self.ensure_field(cfg, 'uri')
self.local_asn = cfg.get('local_asn', None)
self.refresh_interval = cfg.get('refresh_interval', None)
self.netroute_map = dict()
logging.info('Loading databases')
self.db = [self.request_db(t) for t in ['endpoint']]
def update(self):
eb_trans = self.db[0].new_transaction()
cric_dict = dict()
if self.uri.startswith('http'):
data = requests.get(self.uri, verify=False)
cric_dict = json.loads(data.content)
else:
with open(self.uri, 'r') as f_cric:
cric_dict = json.load(f_cric)
for _, rcsite_obj in cric_dict.items():
netroutes = rcsite_obj.get('netroutes', dict())
for _, netroute in netroutes.items():
for _, ipprefixes in netroute['networks'].items():
for ipprefix in ipprefixes:
asn = netroute.get('asn')
if asn == self.local_asn:
eb_trans.add_property(ipprefix, {'is_local': True})
eb_trans.commit()
def run(self):
if self.refresh_interval is None:
self.refresh_interval = 60
while True:
self.update()
time.sleep(self.refresh_interval)
<|reserved_special_token_1|>
import requests
import json
import logging
import time
from alto.server.components.datasource import DBInfo, DataSourceAgent
class CRICAgent(DataSourceAgent):
def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):
super().__init__(dbinfo, name, namespace)
self.uri = self.ensure_field(cfg, 'uri')
self.local_asn = cfg.get('local_asn', None)
self.refresh_interval = cfg.get('refresh_interval', None)
self.netroute_map = dict()
logging.info('Loading databases')
self.db = [self.request_db(t) for t in ['endpoint']]
def update(self):
eb_trans = self.db[0].new_transaction()
cric_dict = dict()
if self.uri.startswith('http'):
data = requests.get(self.uri, verify=False)
cric_dict = json.loads(data.content)
else:
with open(self.uri, 'r') as f_cric:
cric_dict = json.load(f_cric)
for _, rcsite_obj in cric_dict.items():
netroutes = rcsite_obj.get('netroutes', dict())
for _, netroute in netroutes.items():
for _, ipprefixes in netroute['networks'].items():
for ipprefix in ipprefixes:
asn = netroute.get('asn')
if asn == self.local_asn:
eb_trans.add_property(ipprefix, {'is_local': True})
eb_trans.commit()
def run(self):
if self.refresh_interval is None:
self.refresh_interval = 60
while True:
self.update()
time.sleep(self.refresh_interval)
<|reserved_special_token_1|>
import requests
import json
import logging
import time
from alto.server.components.datasource import DBInfo, DataSourceAgent
class CRICAgent(DataSourceAgent):
def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):
super().__init__(dbinfo, name, namespace)
self.uri = self.ensure_field(cfg, 'uri')
self.local_asn = cfg.get('local_asn', None)
self.refresh_interval = cfg.get('refresh_interval', None)
self.netroute_map = dict()
logging.info("Loading databases")
self.db = [ self.request_db(t) for t in ['endpoint']]
def update(self):
eb_trans = self.db[0].new_transaction()
cric_dict = dict()
if self.uri.startswith('http'):
data = requests.get(self.uri, verify=False)
cric_dict = json.loads(data.content)
else:
with open(self.uri, 'r') as f_cric:
cric_dict = json.load(f_cric)
for _, rcsite_obj in cric_dict.items():
netroutes = rcsite_obj.get('netroutes', dict())
for _, netroute in netroutes.items():
for _, ipprefixes in netroute['networks'].items():
for ipprefix in ipprefixes:
asn = netroute.get('asn')
if asn == self.local_asn:
eb_trans.add_property(ipprefix, {'is_local': True})
eb_trans.commit()
def run(self):
if self.refresh_interval is None:
self.refresh_interval = 60
while True:
self.update()
time.sleep(self.refresh_interval)
|
flexible
|
{
"blob_id": "55c00ce4c1657dc5ce78e5eeccd8e9625c0590dc",
"index": 5345,
"step-1": "<mask token>\n\n\nclass CRICAgent(DataSourceAgent):\n <mask token>\n <mask token>\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-2": "<mask token>\n\n\nclass CRICAgent(DataSourceAgent):\n\n def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):\n super().__init__(dbinfo, name, namespace)\n self.uri = self.ensure_field(cfg, 'uri')\n self.local_asn = cfg.get('local_asn', None)\n self.refresh_interval = cfg.get('refresh_interval', None)\n self.netroute_map = dict()\n logging.info('Loading databases')\n self.db = [self.request_db(t) for t in ['endpoint']]\n <mask token>\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-3": "<mask token>\n\n\nclass CRICAgent(DataSourceAgent):\n\n def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):\n super().__init__(dbinfo, name, namespace)\n self.uri = self.ensure_field(cfg, 'uri')\n self.local_asn = cfg.get('local_asn', None)\n self.refresh_interval = cfg.get('refresh_interval', None)\n self.netroute_map = dict()\n logging.info('Loading databases')\n self.db = [self.request_db(t) for t in ['endpoint']]\n\n def update(self):\n eb_trans = self.db[0].new_transaction()\n cric_dict = dict()\n if self.uri.startswith('http'):\n data = requests.get(self.uri, verify=False)\n cric_dict = json.loads(data.content)\n else:\n with open(self.uri, 'r') as f_cric:\n cric_dict = json.load(f_cric)\n for _, rcsite_obj in cric_dict.items():\n netroutes = rcsite_obj.get('netroutes', dict())\n for _, netroute in netroutes.items():\n for _, ipprefixes in netroute['networks'].items():\n for ipprefix in ipprefixes:\n asn = netroute.get('asn')\n if asn == self.local_asn:\n eb_trans.add_property(ipprefix, {'is_local': True})\n eb_trans.commit()\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-4": "import requests\nimport json\nimport logging\nimport time\nfrom alto.server.components.datasource import DBInfo, DataSourceAgent\n\n\nclass CRICAgent(DataSourceAgent):\n\n def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):\n super().__init__(dbinfo, name, namespace)\n self.uri = self.ensure_field(cfg, 'uri')\n self.local_asn = cfg.get('local_asn', None)\n self.refresh_interval = cfg.get('refresh_interval', None)\n self.netroute_map = dict()\n logging.info('Loading databases')\n self.db = [self.request_db(t) for t in ['endpoint']]\n\n def update(self):\n eb_trans = self.db[0].new_transaction()\n cric_dict = dict()\n if self.uri.startswith('http'):\n data = requests.get(self.uri, verify=False)\n cric_dict = json.loads(data.content)\n else:\n with open(self.uri, 'r') as f_cric:\n cric_dict = json.load(f_cric)\n for _, rcsite_obj in cric_dict.items():\n netroutes = rcsite_obj.get('netroutes', dict())\n for _, netroute in netroutes.items():\n for _, ipprefixes in netroute['networks'].items():\n for ipprefix in ipprefixes:\n asn = netroute.get('asn')\n if asn == self.local_asn:\n eb_trans.add_property(ipprefix, {'is_local': True})\n eb_trans.commit()\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-5": "import requests\nimport json\nimport logging\nimport time\n\nfrom alto.server.components.datasource import DBInfo, DataSourceAgent\n\nclass CRICAgent(DataSourceAgent):\n\n def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):\n super().__init__(dbinfo, name, namespace)\n\n self.uri = self.ensure_field(cfg, 'uri')\n self.local_asn = cfg.get('local_asn', None)\n self.refresh_interval = cfg.get('refresh_interval', None)\n self.netroute_map = dict()\n\n logging.info(\"Loading databases\")\n self.db = [ self.request_db(t) for t in ['endpoint']]\n\n def update(self):\n eb_trans = self.db[0].new_transaction()\n cric_dict = dict()\n if self.uri.startswith('http'):\n data = requests.get(self.uri, verify=False)\n cric_dict = json.loads(data.content)\n else:\n with open(self.uri, 'r') as f_cric:\n cric_dict = json.load(f_cric)\n\n for _, rcsite_obj in cric_dict.items():\n netroutes = rcsite_obj.get('netroutes', dict())\n for _, netroute in netroutes.items():\n for _, ipprefixes in netroute['networks'].items():\n for ipprefix in ipprefixes:\n asn = netroute.get('asn')\n if asn == self.local_asn:\n eb_trans.add_property(ipprefix, {'is_local': True})\n eb_trans.commit()\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from entities.GpsFix import GpsFix
class Visit(object):
"""
A Visit, which represents an arrival-departure to a stay point
Attributes:
id_visit: the id of the visit itself
id_stay_point: the id of the stay point
pivot_arrival_fix: the GpsFix that corresponds to real world arrival
pivot_departure_fix: the GpsFix that corresponds to real world departure
detection_arrival_fix: the GpsFix that triggered the arrival by the platform
detection_departure_fix: the GpsFix that triggered the departure by the platform
stay_time: stay time of the visit in seconds
"""
def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix, pivot_departure_fix: GpsFix,
detection_arrival_fix: GpsFix,
detection_departure_fix: GpsFix):
"""
Builds a Visit object
:param id_visit: the id of the visit
:param id_stay_point: the id of the stay point
:param pivot_arrival_fix: the GpsFix that corresponds to real world arrival
:param pivot_departure_fix: the GpsFix that corresponds to real world departure
:param detection_arrival_fix: the GpsFix that triggered the arrival by the platform
:param detection_departure_fix: the GpsFix that triggered the departure by the platform
"""
self.id_visit = id_visit
self.id_stay_point = id_stay_point
self.pivot_arrival_fix = pivot_arrival_fix
self.pivot_departure_fix = pivot_departure_fix
self.detection_arrival_fix = detection_arrival_fix
self.detection_departure_fix = detection_departure_fix
self.stay_time = None
self.update_stay_time()
def update_stay_time(self):
"""
Updates the stay time of visit
:return: None
"""
# It would not be better to simply self.stay_time = self.get_length() ??
self.stay_time = self.get_length()
def get_length(self) -> int:
"""
Gets the length of visit in seconds
:return: The length of visit in seconds
"""
return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()
def __str__(self):
date_format = '%Y-%m-%d %H:%M:%S'
return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,
self.pivot_arrival_fix.timestamp.strftime(date_format),
self.pivot_departure_fix.timestamp.strftime(date_format), self.get_length())
|
normal
|
{
"blob_id": "703ed320e7c06856a0798d9c0de9aafe24458767",
"index": 7937,
"step-1": "<mask token>\n\n\nclass Visit(object):\n <mask token>\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix,\n pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n self.stay_time = self.get_length()\n <mask token>\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format), self.\n pivot_departure_fix.timestamp.strftime(date_format), self.\n get_length())\n",
"step-2": "<mask token>\n\n\nclass Visit(object):\n <mask token>\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix,\n pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n self.stay_time = self.get_length()\n\n def get_length(self) ->int:\n \"\"\"\n Gets the length of visit in seconds\n :return: The length of visit in seconds\n \"\"\"\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix\n .timestamp).total_seconds()\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format), self.\n pivot_departure_fix.timestamp.strftime(date_format), self.\n get_length())\n",
"step-3": "<mask token>\n\n\nclass Visit(object):\n \"\"\"\n A Visit, which represents an arrival-departure to a stay point\n\n Attributes:\n id_visit: the id of the visit itself\n id_stay_point: the id of the stay point\n pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n pivot_departure_fix: the GpsFix that corresponds to real world departure\n detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n detection_departure_fix: the GpsFix that triggered the departure by the platform\n stay_time: stay time of the visit in seconds\n \"\"\"\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix,\n pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n self.stay_time = self.get_length()\n\n def get_length(self) ->int:\n \"\"\"\n Gets the length of visit in seconds\n :return: The length of visit in seconds\n \"\"\"\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix\n .timestamp).total_seconds()\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format), self.\n pivot_departure_fix.timestamp.strftime(date_format), self.\n get_length())\n",
"step-4": "from entities.GpsFix import GpsFix\n\n\nclass Visit(object):\n \"\"\"\n A Visit, which represents an arrival-departure to a stay point\n\n Attributes:\n id_visit: the id of the visit itself\n id_stay_point: the id of the stay point\n pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n pivot_departure_fix: the GpsFix that corresponds to real world departure\n detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n detection_departure_fix: the GpsFix that triggered the departure by the platform\n stay_time: stay time of the visit in seconds\n \"\"\"\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix,\n pivot_departure_fix: GpsFix, detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n self.stay_time = self.get_length()\n\n def get_length(self) ->int:\n \"\"\"\n Gets the length of visit in seconds\n :return: The length of visit in seconds\n \"\"\"\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix\n .timestamp).total_seconds()\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format), self.\n pivot_departure_fix.timestamp.strftime(date_format), self.\n get_length())\n",
"step-5": "from entities.GpsFix import GpsFix\n\n\nclass Visit(object):\n \"\"\"\n A Visit, which represents an arrival-departure to a stay point\n\n Attributes:\n id_visit: the id of the visit itself\n id_stay_point: the id of the stay point\n pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n pivot_departure_fix: the GpsFix that corresponds to real world departure\n detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n detection_departure_fix: the GpsFix that triggered the departure by the platform\n stay_time: stay time of the visit in seconds\n \"\"\"\n\n def __init__(self, id_visit, id_stay_point, pivot_arrival_fix: GpsFix, pivot_departure_fix: GpsFix,\n detection_arrival_fix: GpsFix,\n detection_departure_fix: GpsFix):\n \"\"\"\n Builds a Visit object\n :param id_visit: the id of the visit\n :param id_stay_point: the id of the stay point\n :param pivot_arrival_fix: the GpsFix that corresponds to real world arrival\n :param pivot_departure_fix: the GpsFix that corresponds to real world departure\n :param detection_arrival_fix: the GpsFix that triggered the arrival by the platform\n :param detection_departure_fix: the GpsFix that triggered the departure by the platform\n \"\"\"\n self.id_visit = id_visit\n self.id_stay_point = id_stay_point\n self.pivot_arrival_fix = pivot_arrival_fix\n self.pivot_departure_fix = pivot_departure_fix\n self.detection_arrival_fix = detection_arrival_fix\n self.detection_departure_fix = detection_departure_fix\n self.stay_time = None\n self.update_stay_time()\n\n def update_stay_time(self):\n \"\"\"\n Updates the stay time of visit\n :return: None\n \"\"\"\n # It would not be better to simply self.stay_time = self.get_length() ??\n self.stay_time = self.get_length()\n\n def get_length(self) -> int:\n \"\"\"\n Gets the length of visit in seconds\n :return: The length of visit in seconds\n \"\"\"\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()\n\n def __str__(self):\n date_format = '%Y-%m-%d %H:%M:%S'\n return '{},{},{},{},{}'.format(self.id_visit, self.id_stay_point,\n self.pivot_arrival_fix.timestamp.strftime(date_format),\n self.pivot_departure_fix.timestamp.strftime(date_format), self.get_length())\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import flask
from flask.ext.classy import FlaskView, route, request
from annotator_supreme.controllers.user_controller import UserController
from annotator_supreme.views import view_tools
from annotator_supreme.views import error_views
from flask import render_template, flash, redirect, url_for
from annotator_supreme import app
from flask.ext.login import login_user, logout_user
import json
class LoginViewWebApp(FlaskView):
route_base = '/'
def __init__(self):
self.user_controller = UserController()
@route('/register' , methods=['GET','POST'])
def register_user(self):
if request.method == 'GET':
return render_template('register.html')
elif request.method == 'POST':
app.logger.info("Got post")
app.logger.info(request.form)
username, password, email = request.form['username'] , request.form['password'], request.form['email']
ok, error = self.user_controller.create_user(username, password, email)
if ok:
return "", 200
else:
return "User already registered", 432
@route('/login',methods=['GET','POST'])
def login(self):
if request.method == 'GET':
return render_template('login.html')
elif request.method == 'POST':
username = request.form['username']
password = request.form['password']
user = self.user_controller.get_user_w_password(username, password)
if user is None:
return "Invalid credentials", 432
else:
login_user(user)
return "", 200
@route('/logout', methods=['GET'])
def logout(self):
logout_user()
return "", 200
|
normal
|
{
"blob_id": "a2e77298059104b403555af95430d7995f8a697b",
"index": 1379,
"step-1": "<mask token>\n\n\nclass LoginViewWebApp(FlaskView):\n <mask token>\n\n def __init__(self):\n self.user_controller = UserController()\n\n @route('/register', methods=['GET', 'POST'])\n def register_user(self):\n if request.method == 'GET':\n return render_template('register.html')\n elif request.method == 'POST':\n app.logger.info('Got post')\n app.logger.info(request.form)\n username, password, email = request.form['username'], request.form[\n 'password'], request.form['email']\n ok, error = self.user_controller.create_user(username, password,\n email)\n if ok:\n return '', 200\n else:\n return 'User already registered', 432\n <mask token>\n\n @route('/logout', methods=['GET'])\n def logout(self):\n logout_user()\n return '', 200\n",
"step-2": "<mask token>\n\n\nclass LoginViewWebApp(FlaskView):\n <mask token>\n\n def __init__(self):\n self.user_controller = UserController()\n\n @route('/register', methods=['GET', 'POST'])\n def register_user(self):\n if request.method == 'GET':\n return render_template('register.html')\n elif request.method == 'POST':\n app.logger.info('Got post')\n app.logger.info(request.form)\n username, password, email = request.form['username'], request.form[\n 'password'], request.form['email']\n ok, error = self.user_controller.create_user(username, password,\n email)\n if ok:\n return '', 200\n else:\n return 'User already registered', 432\n\n @route('/login', methods=['GET', 'POST'])\n def login(self):\n if request.method == 'GET':\n return render_template('login.html')\n elif request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n user = self.user_controller.get_user_w_password(username, password)\n if user is None:\n return 'Invalid credentials', 432\n else:\n login_user(user)\n return '', 200\n\n @route('/logout', methods=['GET'])\n def logout(self):\n logout_user()\n return '', 200\n",
"step-3": "<mask token>\n\n\nclass LoginViewWebApp(FlaskView):\n route_base = '/'\n\n def __init__(self):\n self.user_controller = UserController()\n\n @route('/register', methods=['GET', 'POST'])\n def register_user(self):\n if request.method == 'GET':\n return render_template('register.html')\n elif request.method == 'POST':\n app.logger.info('Got post')\n app.logger.info(request.form)\n username, password, email = request.form['username'], request.form[\n 'password'], request.form['email']\n ok, error = self.user_controller.create_user(username, password,\n email)\n if ok:\n return '', 200\n else:\n return 'User already registered', 432\n\n @route('/login', methods=['GET', 'POST'])\n def login(self):\n if request.method == 'GET':\n return render_template('login.html')\n elif request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n user = self.user_controller.get_user_w_password(username, password)\n if user is None:\n return 'Invalid credentials', 432\n else:\n login_user(user)\n return '', 200\n\n @route('/logout', methods=['GET'])\n def logout(self):\n logout_user()\n return '', 200\n",
"step-4": "import flask\nfrom flask.ext.classy import FlaskView, route, request\nfrom annotator_supreme.controllers.user_controller import UserController\nfrom annotator_supreme.views import view_tools\nfrom annotator_supreme.views import error_views\nfrom flask import render_template, flash, redirect, url_for\nfrom annotator_supreme import app\nfrom flask.ext.login import login_user, logout_user\nimport json\n\n\nclass LoginViewWebApp(FlaskView):\n route_base = '/'\n\n def __init__(self):\n self.user_controller = UserController()\n\n @route('/register', methods=['GET', 'POST'])\n def register_user(self):\n if request.method == 'GET':\n return render_template('register.html')\n elif request.method == 'POST':\n app.logger.info('Got post')\n app.logger.info(request.form)\n username, password, email = request.form['username'], request.form[\n 'password'], request.form['email']\n ok, error = self.user_controller.create_user(username, password,\n email)\n if ok:\n return '', 200\n else:\n return 'User already registered', 432\n\n @route('/login', methods=['GET', 'POST'])\n def login(self):\n if request.method == 'GET':\n return render_template('login.html')\n elif request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n user = self.user_controller.get_user_w_password(username, password)\n if user is None:\n return 'Invalid credentials', 432\n else:\n login_user(user)\n return '', 200\n\n @route('/logout', methods=['GET'])\n def logout(self):\n logout_user()\n return '', 200\n",
"step-5": "import flask\nfrom flask.ext.classy import FlaskView, route, request\nfrom annotator_supreme.controllers.user_controller import UserController\nfrom annotator_supreme.views import view_tools\nfrom annotator_supreme.views import error_views\nfrom flask import render_template, flash, redirect, url_for\nfrom annotator_supreme import app\nfrom flask.ext.login import login_user, logout_user\nimport json\n\nclass LoginViewWebApp(FlaskView):\n route_base = '/'\n\n def __init__(self):\n self.user_controller = UserController()\n\n @route('/register' , methods=['GET','POST'])\n def register_user(self):\n if request.method == 'GET':\n return render_template('register.html')\n elif request.method == 'POST':\n app.logger.info(\"Got post\")\n app.logger.info(request.form)\n\n username, password, email = request.form['username'] , request.form['password'], request.form['email']\n ok, error = self.user_controller.create_user(username, password, email)\n if ok:\n return \"\", 200\n else:\n return \"User already registered\", 432\n \n @route('/login',methods=['GET','POST'])\n def login(self):\n if request.method == 'GET':\n return render_template('login.html')\n elif request.method == 'POST': \n username = request.form['username']\n password = request.form['password']\n user = self.user_controller.get_user_w_password(username, password)\n if user is None:\n return \"Invalid credentials\", 432\n else:\n login_user(user)\n return \"\", 200\n\n @route('/logout', methods=['GET'])\n def logout(self):\n logout_user()\n return \"\", 200\n \n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for data in password:
if data != pw:
pass
else:
print('พบข้อมูลรหัสผ่านนี้')
print('แล้วเจอกันใหม่')
<|reserved_special_token_1|>
password = ['123456', '1111']
pw = input('รหัสผ่านคือ>>>')
for data in password:
if data != pw:
pass
else:
print('พบข้อมูลรหัสผ่านนี้')
print('แล้วเจอกันใหม่')
<|reserved_special_token_1|>
password = ["123456", "1111"]
pw = input("รหัสผ่านคือ>>>")
for data in password:
if data != pw:
pass
else:
print("พบข้อมูลรหัสผ่านนี้")
print("แล้วเจอกันใหม่")
|
flexible
|
{
"blob_id": "6f05b1352e776e20d6a9e0eb457d8914cbfc2d22",
"index": 2779,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor data in password:\n if data != pw:\n pass\n else:\n print('พบข้อมูลรหัสผ่านนี้')\nprint('แล้วเจอกันใหม่')\n",
"step-3": "password = ['123456', '1111']\npw = input('รหัสผ่านคือ>>>')\nfor data in password:\n if data != pw:\n pass\n else:\n print('พบข้อมูลรหัสผ่านนี้')\nprint('แล้วเจอกันใหม่')\n",
"step-4": "password = [\"123456\", \"1111\"]\r\npw = input(\"รหัสผ่านคือ>>>\")\r\nfor data in password:\r\n if data != pw:\r\n pass\r\n else:\r\n print(\"พบข้อมูลรหัสผ่านนี้\")\r\nprint(\"แล้วเจอกันใหม่\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pygame
import numpy as np
import random
from enum import Enum
from .config import *
class Actions(Enum):
FORWARD = 0
RIGHT = 1
LEFT = 2
BACK = 3
class MazeEnv():
''' TODO '''
def __init__(self, GW, GH, SW, SH):
global GRID_WIDTH, GRID_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT, BOX_WIDTH, BOX_HEIGHT
GRID_WIDTH = GW
GRID_HEIGHT = GH
SCREEN_WIDTH = SW
SCREEN_HEIGHT = SH
BOX_WIDTH = SCREEN_WIDTH/GRID_WIDTH
BOX_HEIGHT = SCREEN_HEIGHT/GRID_HEIGHT
WIN_STATE = random.randint(0, GRID_WIDTH * GRID_HEIGHT - 1)
# Setup ML stuff
self.pos = np.array(self.getPos(SPAWN_STATE))
self.action_space = Actions
self.max_states = GRID_WIDTH * GRID_HEIGHT
self.max_actions = len(self.action_space)
self.Q = np.zeros([GRID_WIDTH*GRID_HEIGHT, len(self.action_space)])
self.tunnel_vision = False
# Other
self.WALLS = list(WALLS)
self.WIN_STATE = WIN_STATE
self.SPAWN_STATE = SPAWN_STATE
def step(self, action):
self.pos = self.moveDir(self.pos, self.action_space(action))
reward = -0.04
done = True
if self.getState() == self.WIN_STATE:
reward = 10
else:
done = False
return (self.getState(), reward, done, {})
def reset(self):
self.pos = np.array(self.getPos(self.SPAWN_STATE))
def render(self, screen, close=False):
self.screen = screen
self.screen.fill((0, 0, 0))
# Draw the grid
# font = pygame.font.Font(None, 22)
for x in range(GRID_WIDTH):
for y in range(GRID_HEIGHT):
all_points = []
all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])
all_points.append([[x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])
all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])
all_points.append([[x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])
width = 34
height = 10
text_offs = [[(BOX_WIDTH/2-width/2), height/2], [BOX_WIDTH-width, BOX_HEIGHT/2-height/2], [4, BOX_HEIGHT/2-height/2], [BOX_WIDTH/2-width/2, BOX_HEIGHT-height-4]]
for a in range(4):
s = pygame.Surface((BOX_WIDTH,BOX_HEIGHT), pygame.SRCALPHA)
s.fill((0, 0, 0, 0))
if self.getState((x, y)) == self.WIN_STATE:
col = (0, 255, 0, 255)
elif [x, y] in self.WALLS:
col = (128, 128, 128, 255)
elif len(self.Q) <= self.getState((x, y)) or len(self.Q[self.getState((x, y))]) <= a:
col = (0, 0, 0, 0)
elif self.Q[self.getState((x, y))][a] > 0:
col = (0, 255, 0, 60 + self.Q[self.getState((x, y))][a] / self.Q.max() * 195)
elif self.Q[self.getState((x, y))][a] < 0:
col = (255, 0, 0, 60 + self.Q[self.getState((x, y))][a] / self.Q.min() * 195)
else:
col = (0, 0, 0, 0)
if not self.tunnel_vision or self.getState((x, y)) == self.getState():
pygame.draw.polygon(s, col, [[all_points[a][b][0]-x*BOX_WIDTH, all_points[a][b][1]-y*BOX_HEIGHT] for b in range(3)])
self.screen.blit(s, (x*BOX_WIDTH, y*BOX_HEIGHT))
if self.getState((x, y)) != self.WIN_STATE and [x, y] not in self.WALLS:
pygame.draw.polygon(self.screen, (255, 255, 255), all_points[a], 2)
#if BOX_WIDTH > 80:
#trender = font.render("{0:.2f}".format(self.Q[self.getState((x, y)), a]), True, (255, 255, 255))
#self.screen.blit(trender, (x*BOX_WIDTH+text_offs[a][0], y*BOX_HEIGHT+text_offs[a][1]))
# Draw the player
pygame.draw.circle(self.screen, (0, 0, 255),
(int((self.pos[0]+0.5)*BOX_WIDTH),
int((self.pos[1]+0.5)*BOX_HEIGHT)),
max(10, int(BOX_WIDTH/10)))
pygame.display.update()
def moveDir(self, pos, action):
oldPos = list(pos)
if action == Actions.FORWARD:
pos[1] -= 1
elif action == Actions.RIGHT:
pos[0] += 1
elif action == Actions.LEFT:
pos[0] -= 1
elif action == Actions.BACK:
pos[1] += 1
if pos[0] < 0 or pos[0] >= GRID_WIDTH or pos[1] < 0 or pos[1] >= GRID_HEIGHT \
or self.hitWall(pos):
pos = oldPos
return pos
def hitWall(self, pos):
for w in self.WALLS:
if w[0] == pos[0] and w[1] == pos[1]:
return True
return False
def getState(self, pos=False):
if not pos:
pos = self.pos
return int(pos[1]*GRID_WIDTH+pos[0])
def getPos(self, state):
return [state % GRID_WIDTH, state // GRID_WIDTH]
|
normal
|
{
"blob_id": "751d2a07b97d080988c54511ca13a97a969e06bd",
"index": 6405,
"step-1": "<mask token>\n\n\nclass MazeEnv:\n <mask token>\n\n def __init__(self, GW, GH, SW, SH):\n global GRID_WIDTH, GRID_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT, BOX_WIDTH, BOX_HEIGHT\n GRID_WIDTH = GW\n GRID_HEIGHT = GH\n SCREEN_WIDTH = SW\n SCREEN_HEIGHT = SH\n BOX_WIDTH = SCREEN_WIDTH / GRID_WIDTH\n BOX_HEIGHT = SCREEN_HEIGHT / GRID_HEIGHT\n WIN_STATE = random.randint(0, GRID_WIDTH * GRID_HEIGHT - 1)\n self.pos = np.array(self.getPos(SPAWN_STATE))\n self.action_space = Actions\n self.max_states = GRID_WIDTH * GRID_HEIGHT\n self.max_actions = len(self.action_space)\n self.Q = np.zeros([GRID_WIDTH * GRID_HEIGHT, len(self.action_space)])\n self.tunnel_vision = False\n self.WALLS = list(WALLS)\n self.WIN_STATE = WIN_STATE\n self.SPAWN_STATE = SPAWN_STATE\n\n def step(self, action):\n self.pos = self.moveDir(self.pos, self.action_space(action))\n reward = -0.04\n done = True\n if self.getState() == self.WIN_STATE:\n reward = 10\n else:\n done = False\n return self.getState(), reward, done, {}\n\n def reset(self):\n self.pos = np.array(self.getPos(self.SPAWN_STATE))\n\n def render(self, screen, close=False):\n self.screen = screen\n self.screen.fill((0, 0, 0))\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n all_points = []\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x *\n BOX_WIDTH + BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH +\n BOX_WIDTH / 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH + BOX_WIDTH, y *\n BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH, y * BOX_HEIGHT +\n BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH / 2, y *\n BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x *\n BOX_WIDTH, y * BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH +\n BOX_WIDTH / 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH + BOX_WIDTH, y *\n BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH, y *\n BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH / \n 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n width = 34\n height = 10\n text_offs = [[BOX_WIDTH / 2 - width / 2, height / 2], [\n BOX_WIDTH - width, BOX_HEIGHT / 2 - height / 2], [4, \n BOX_HEIGHT / 2 - height / 2], [BOX_WIDTH / 2 - width / \n 2, BOX_HEIGHT - height - 4]]\n for a in range(4):\n s = pygame.Surface((BOX_WIDTH, BOX_HEIGHT), pygame.SRCALPHA\n )\n s.fill((0, 0, 0, 0))\n if self.getState((x, y)) == self.WIN_STATE:\n col = 0, 255, 0, 255\n elif [x, y] in self.WALLS:\n col = 128, 128, 128, 255\n elif len(self.Q) <= self.getState((x, y)) or len(self.Q\n [self.getState((x, y))]) <= a:\n col = 0, 0, 0, 0\n elif self.Q[self.getState((x, y))][a] > 0:\n col = 0, 255, 0, 60 + self.Q[self.getState((x, y))][a\n ] / self.Q.max() * 195\n elif self.Q[self.getState((x, y))][a] < 0:\n col = 255, 0, 0, 60 + self.Q[self.getState((x, y))][a\n ] / self.Q.min() * 195\n else:\n col = 0, 0, 0, 0\n if not self.tunnel_vision or self.getState((x, y)\n ) == self.getState():\n pygame.draw.polygon(s, col, [[all_points[a][b][0] -\n x * BOX_WIDTH, all_points[a][b][1] - y *\n BOX_HEIGHT] for b in range(3)])\n self.screen.blit(s, (x * BOX_WIDTH, y * BOX_HEIGHT))\n if self.getState((x, y)) != self.WIN_STATE and [x, y\n ] not in self.WALLS:\n pygame.draw.polygon(self.screen, (255, 255, 255\n ), all_points[a], 2)\n pygame.draw.circle(self.screen, (0, 0, 255), (int((self.pos[0] + \n 0.5) * BOX_WIDTH), int((self.pos[1] + 0.5) * BOX_HEIGHT)), max(\n 10, int(BOX_WIDTH / 10)))\n pygame.display.update()\n\n def moveDir(self, pos, action):\n oldPos = list(pos)\n if action == Actions.FORWARD:\n pos[1] -= 1\n elif action == Actions.RIGHT:\n pos[0] += 1\n elif action == Actions.LEFT:\n pos[0] -= 1\n elif action == Actions.BACK:\n pos[1] += 1\n if pos[0] < 0 or pos[0] >= GRID_WIDTH or pos[1] < 0 or pos[1\n ] >= GRID_HEIGHT or self.hitWall(pos):\n pos = oldPos\n return pos\n\n def hitWall(self, pos):\n for w in self.WALLS:\n if w[0] == pos[0] and w[1] == pos[1]:\n return True\n return False\n\n def getState(self, pos=False):\n if not pos:\n pos = self.pos\n return int(pos[1] * GRID_WIDTH + pos[0])\n\n def getPos(self, state):\n return [state % GRID_WIDTH, state // GRID_WIDTH]\n",
"step-2": "<mask token>\n\n\nclass MazeEnv:\n \"\"\" TODO \"\"\"\n\n def __init__(self, GW, GH, SW, SH):\n global GRID_WIDTH, GRID_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT, BOX_WIDTH, BOX_HEIGHT\n GRID_WIDTH = GW\n GRID_HEIGHT = GH\n SCREEN_WIDTH = SW\n SCREEN_HEIGHT = SH\n BOX_WIDTH = SCREEN_WIDTH / GRID_WIDTH\n BOX_HEIGHT = SCREEN_HEIGHT / GRID_HEIGHT\n WIN_STATE = random.randint(0, GRID_WIDTH * GRID_HEIGHT - 1)\n self.pos = np.array(self.getPos(SPAWN_STATE))\n self.action_space = Actions\n self.max_states = GRID_WIDTH * GRID_HEIGHT\n self.max_actions = len(self.action_space)\n self.Q = np.zeros([GRID_WIDTH * GRID_HEIGHT, len(self.action_space)])\n self.tunnel_vision = False\n self.WALLS = list(WALLS)\n self.WIN_STATE = WIN_STATE\n self.SPAWN_STATE = SPAWN_STATE\n\n def step(self, action):\n self.pos = self.moveDir(self.pos, self.action_space(action))\n reward = -0.04\n done = True\n if self.getState() == self.WIN_STATE:\n reward = 10\n else:\n done = False\n return self.getState(), reward, done, {}\n\n def reset(self):\n self.pos = np.array(self.getPos(self.SPAWN_STATE))\n\n def render(self, screen, close=False):\n self.screen = screen\n self.screen.fill((0, 0, 0))\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n all_points = []\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x *\n BOX_WIDTH + BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH +\n BOX_WIDTH / 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH + BOX_WIDTH, y *\n BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH, y * BOX_HEIGHT +\n BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH / 2, y *\n BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x *\n BOX_WIDTH, y * BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH +\n BOX_WIDTH / 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH + BOX_WIDTH, y *\n BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH, y *\n BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH / \n 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n width = 34\n height = 10\n text_offs = [[BOX_WIDTH / 2 - width / 2, height / 2], [\n BOX_WIDTH - width, BOX_HEIGHT / 2 - height / 2], [4, \n BOX_HEIGHT / 2 - height / 2], [BOX_WIDTH / 2 - width / \n 2, BOX_HEIGHT - height - 4]]\n for a in range(4):\n s = pygame.Surface((BOX_WIDTH, BOX_HEIGHT), pygame.SRCALPHA\n )\n s.fill((0, 0, 0, 0))\n if self.getState((x, y)) == self.WIN_STATE:\n col = 0, 255, 0, 255\n elif [x, y] in self.WALLS:\n col = 128, 128, 128, 255\n elif len(self.Q) <= self.getState((x, y)) or len(self.Q\n [self.getState((x, y))]) <= a:\n col = 0, 0, 0, 0\n elif self.Q[self.getState((x, y))][a] > 0:\n col = 0, 255, 0, 60 + self.Q[self.getState((x, y))][a\n ] / self.Q.max() * 195\n elif self.Q[self.getState((x, y))][a] < 0:\n col = 255, 0, 0, 60 + self.Q[self.getState((x, y))][a\n ] / self.Q.min() * 195\n else:\n col = 0, 0, 0, 0\n if not self.tunnel_vision or self.getState((x, y)\n ) == self.getState():\n pygame.draw.polygon(s, col, [[all_points[a][b][0] -\n x * BOX_WIDTH, all_points[a][b][1] - y *\n BOX_HEIGHT] for b in range(3)])\n self.screen.blit(s, (x * BOX_WIDTH, y * BOX_HEIGHT))\n if self.getState((x, y)) != self.WIN_STATE and [x, y\n ] not in self.WALLS:\n pygame.draw.polygon(self.screen, (255, 255, 255\n ), all_points[a], 2)\n pygame.draw.circle(self.screen, (0, 0, 255), (int((self.pos[0] + \n 0.5) * BOX_WIDTH), int((self.pos[1] + 0.5) * BOX_HEIGHT)), max(\n 10, int(BOX_WIDTH / 10)))\n pygame.display.update()\n\n def moveDir(self, pos, action):\n oldPos = list(pos)\n if action == Actions.FORWARD:\n pos[1] -= 1\n elif action == Actions.RIGHT:\n pos[0] += 1\n elif action == Actions.LEFT:\n pos[0] -= 1\n elif action == Actions.BACK:\n pos[1] += 1\n if pos[0] < 0 or pos[0] >= GRID_WIDTH or pos[1] < 0 or pos[1\n ] >= GRID_HEIGHT or self.hitWall(pos):\n pos = oldPos\n return pos\n\n def hitWall(self, pos):\n for w in self.WALLS:\n if w[0] == pos[0] and w[1] == pos[1]:\n return True\n return False\n\n def getState(self, pos=False):\n if not pos:\n pos = self.pos\n return int(pos[1] * GRID_WIDTH + pos[0])\n\n def getPos(self, state):\n return [state % GRID_WIDTH, state // GRID_WIDTH]\n",
"step-3": "<mask token>\n\n\nclass Actions(Enum):\n FORWARD = 0\n RIGHT = 1\n LEFT = 2\n BACK = 3\n\n\nclass MazeEnv:\n \"\"\" TODO \"\"\"\n\n def __init__(self, GW, GH, SW, SH):\n global GRID_WIDTH, GRID_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT, BOX_WIDTH, BOX_HEIGHT\n GRID_WIDTH = GW\n GRID_HEIGHT = GH\n SCREEN_WIDTH = SW\n SCREEN_HEIGHT = SH\n BOX_WIDTH = SCREEN_WIDTH / GRID_WIDTH\n BOX_HEIGHT = SCREEN_HEIGHT / GRID_HEIGHT\n WIN_STATE = random.randint(0, GRID_WIDTH * GRID_HEIGHT - 1)\n self.pos = np.array(self.getPos(SPAWN_STATE))\n self.action_space = Actions\n self.max_states = GRID_WIDTH * GRID_HEIGHT\n self.max_actions = len(self.action_space)\n self.Q = np.zeros([GRID_WIDTH * GRID_HEIGHT, len(self.action_space)])\n self.tunnel_vision = False\n self.WALLS = list(WALLS)\n self.WIN_STATE = WIN_STATE\n self.SPAWN_STATE = SPAWN_STATE\n\n def step(self, action):\n self.pos = self.moveDir(self.pos, self.action_space(action))\n reward = -0.04\n done = True\n if self.getState() == self.WIN_STATE:\n reward = 10\n else:\n done = False\n return self.getState(), reward, done, {}\n\n def reset(self):\n self.pos = np.array(self.getPos(self.SPAWN_STATE))\n\n def render(self, screen, close=False):\n self.screen = screen\n self.screen.fill((0, 0, 0))\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n all_points = []\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x *\n BOX_WIDTH + BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH +\n BOX_WIDTH / 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH + BOX_WIDTH, y *\n BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH, y * BOX_HEIGHT +\n BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH / 2, y *\n BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x *\n BOX_WIDTH, y * BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH +\n BOX_WIDTH / 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH + BOX_WIDTH, y *\n BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH, y *\n BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH / \n 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n width = 34\n height = 10\n text_offs = [[BOX_WIDTH / 2 - width / 2, height / 2], [\n BOX_WIDTH - width, BOX_HEIGHT / 2 - height / 2], [4, \n BOX_HEIGHT / 2 - height / 2], [BOX_WIDTH / 2 - width / \n 2, BOX_HEIGHT - height - 4]]\n for a in range(4):\n s = pygame.Surface((BOX_WIDTH, BOX_HEIGHT), pygame.SRCALPHA\n )\n s.fill((0, 0, 0, 0))\n if self.getState((x, y)) == self.WIN_STATE:\n col = 0, 255, 0, 255\n elif [x, y] in self.WALLS:\n col = 128, 128, 128, 255\n elif len(self.Q) <= self.getState((x, y)) or len(self.Q\n [self.getState((x, y))]) <= a:\n col = 0, 0, 0, 0\n elif self.Q[self.getState((x, y))][a] > 0:\n col = 0, 255, 0, 60 + self.Q[self.getState((x, y))][a\n ] / self.Q.max() * 195\n elif self.Q[self.getState((x, y))][a] < 0:\n col = 255, 0, 0, 60 + self.Q[self.getState((x, y))][a\n ] / self.Q.min() * 195\n else:\n col = 0, 0, 0, 0\n if not self.tunnel_vision or self.getState((x, y)\n ) == self.getState():\n pygame.draw.polygon(s, col, [[all_points[a][b][0] -\n x * BOX_WIDTH, all_points[a][b][1] - y *\n BOX_HEIGHT] for b in range(3)])\n self.screen.blit(s, (x * BOX_WIDTH, y * BOX_HEIGHT))\n if self.getState((x, y)) != self.WIN_STATE and [x, y\n ] not in self.WALLS:\n pygame.draw.polygon(self.screen, (255, 255, 255\n ), all_points[a], 2)\n pygame.draw.circle(self.screen, (0, 0, 255), (int((self.pos[0] + \n 0.5) * BOX_WIDTH), int((self.pos[1] + 0.5) * BOX_HEIGHT)), max(\n 10, int(BOX_WIDTH / 10)))\n pygame.display.update()\n\n def moveDir(self, pos, action):\n oldPos = list(pos)\n if action == Actions.FORWARD:\n pos[1] -= 1\n elif action == Actions.RIGHT:\n pos[0] += 1\n elif action == Actions.LEFT:\n pos[0] -= 1\n elif action == Actions.BACK:\n pos[1] += 1\n if pos[0] < 0 or pos[0] >= GRID_WIDTH or pos[1] < 0 or pos[1\n ] >= GRID_HEIGHT or self.hitWall(pos):\n pos = oldPos\n return pos\n\n def hitWall(self, pos):\n for w in self.WALLS:\n if w[0] == pos[0] and w[1] == pos[1]:\n return True\n return False\n\n def getState(self, pos=False):\n if not pos:\n pos = self.pos\n return int(pos[1] * GRID_WIDTH + pos[0])\n\n def getPos(self, state):\n return [state % GRID_WIDTH, state // GRID_WIDTH]\n",
"step-4": "import pygame\nimport numpy as np\nimport random\nfrom enum import Enum\nfrom .config import *\n\n\nclass Actions(Enum):\n FORWARD = 0\n RIGHT = 1\n LEFT = 2\n BACK = 3\n\n\nclass MazeEnv:\n \"\"\" TODO \"\"\"\n\n def __init__(self, GW, GH, SW, SH):\n global GRID_WIDTH, GRID_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT, BOX_WIDTH, BOX_HEIGHT\n GRID_WIDTH = GW\n GRID_HEIGHT = GH\n SCREEN_WIDTH = SW\n SCREEN_HEIGHT = SH\n BOX_WIDTH = SCREEN_WIDTH / GRID_WIDTH\n BOX_HEIGHT = SCREEN_HEIGHT / GRID_HEIGHT\n WIN_STATE = random.randint(0, GRID_WIDTH * GRID_HEIGHT - 1)\n self.pos = np.array(self.getPos(SPAWN_STATE))\n self.action_space = Actions\n self.max_states = GRID_WIDTH * GRID_HEIGHT\n self.max_actions = len(self.action_space)\n self.Q = np.zeros([GRID_WIDTH * GRID_HEIGHT, len(self.action_space)])\n self.tunnel_vision = False\n self.WALLS = list(WALLS)\n self.WIN_STATE = WIN_STATE\n self.SPAWN_STATE = SPAWN_STATE\n\n def step(self, action):\n self.pos = self.moveDir(self.pos, self.action_space(action))\n reward = -0.04\n done = True\n if self.getState() == self.WIN_STATE:\n reward = 10\n else:\n done = False\n return self.getState(), reward, done, {}\n\n def reset(self):\n self.pos = np.array(self.getPos(self.SPAWN_STATE))\n\n def render(self, screen, close=False):\n self.screen = screen\n self.screen.fill((0, 0, 0))\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n all_points = []\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x *\n BOX_WIDTH + BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH +\n BOX_WIDTH / 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH + BOX_WIDTH, y *\n BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH, y * BOX_HEIGHT +\n BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH / 2, y *\n BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x *\n BOX_WIDTH, y * BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH +\n BOX_WIDTH / 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n all_points.append([[x * BOX_WIDTH + BOX_WIDTH, y *\n BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH, y *\n BOX_HEIGHT + BOX_HEIGHT], [x * BOX_WIDTH + BOX_WIDTH / \n 2, y * BOX_HEIGHT + BOX_HEIGHT / 2]])\n width = 34\n height = 10\n text_offs = [[BOX_WIDTH / 2 - width / 2, height / 2], [\n BOX_WIDTH - width, BOX_HEIGHT / 2 - height / 2], [4, \n BOX_HEIGHT / 2 - height / 2], [BOX_WIDTH / 2 - width / \n 2, BOX_HEIGHT - height - 4]]\n for a in range(4):\n s = pygame.Surface((BOX_WIDTH, BOX_HEIGHT), pygame.SRCALPHA\n )\n s.fill((0, 0, 0, 0))\n if self.getState((x, y)) == self.WIN_STATE:\n col = 0, 255, 0, 255\n elif [x, y] in self.WALLS:\n col = 128, 128, 128, 255\n elif len(self.Q) <= self.getState((x, y)) or len(self.Q\n [self.getState((x, y))]) <= a:\n col = 0, 0, 0, 0\n elif self.Q[self.getState((x, y))][a] > 0:\n col = 0, 255, 0, 60 + self.Q[self.getState((x, y))][a\n ] / self.Q.max() * 195\n elif self.Q[self.getState((x, y))][a] < 0:\n col = 255, 0, 0, 60 + self.Q[self.getState((x, y))][a\n ] / self.Q.min() * 195\n else:\n col = 0, 0, 0, 0\n if not self.tunnel_vision or self.getState((x, y)\n ) == self.getState():\n pygame.draw.polygon(s, col, [[all_points[a][b][0] -\n x * BOX_WIDTH, all_points[a][b][1] - y *\n BOX_HEIGHT] for b in range(3)])\n self.screen.blit(s, (x * BOX_WIDTH, y * BOX_HEIGHT))\n if self.getState((x, y)) != self.WIN_STATE and [x, y\n ] not in self.WALLS:\n pygame.draw.polygon(self.screen, (255, 255, 255\n ), all_points[a], 2)\n pygame.draw.circle(self.screen, (0, 0, 255), (int((self.pos[0] + \n 0.5) * BOX_WIDTH), int((self.pos[1] + 0.5) * BOX_HEIGHT)), max(\n 10, int(BOX_WIDTH / 10)))\n pygame.display.update()\n\n def moveDir(self, pos, action):\n oldPos = list(pos)\n if action == Actions.FORWARD:\n pos[1] -= 1\n elif action == Actions.RIGHT:\n pos[0] += 1\n elif action == Actions.LEFT:\n pos[0] -= 1\n elif action == Actions.BACK:\n pos[1] += 1\n if pos[0] < 0 or pos[0] >= GRID_WIDTH or pos[1] < 0 or pos[1\n ] >= GRID_HEIGHT or self.hitWall(pos):\n pos = oldPos\n return pos\n\n def hitWall(self, pos):\n for w in self.WALLS:\n if w[0] == pos[0] and w[1] == pos[1]:\n return True\n return False\n\n def getState(self, pos=False):\n if not pos:\n pos = self.pos\n return int(pos[1] * GRID_WIDTH + pos[0])\n\n def getPos(self, state):\n return [state % GRID_WIDTH, state // GRID_WIDTH]\n",
"step-5": "import pygame\nimport numpy as np\nimport random\nfrom enum import Enum\nfrom .config import *\n\nclass Actions(Enum):\n FORWARD = 0\n RIGHT = 1\n LEFT = 2\n BACK = 3\n\nclass MazeEnv():\n ''' TODO '''\n def __init__(self, GW, GH, SW, SH):\n global GRID_WIDTH, GRID_HEIGHT, SCREEN_WIDTH, SCREEN_HEIGHT, BOX_WIDTH, BOX_HEIGHT\n\n GRID_WIDTH = GW\n GRID_HEIGHT = GH\n SCREEN_WIDTH = SW\n SCREEN_HEIGHT = SH\n\n BOX_WIDTH = SCREEN_WIDTH/GRID_WIDTH\n BOX_HEIGHT = SCREEN_HEIGHT/GRID_HEIGHT\n\n WIN_STATE = random.randint(0, GRID_WIDTH * GRID_HEIGHT - 1)\n # Setup ML stuff\n self.pos = np.array(self.getPos(SPAWN_STATE))\n self.action_space = Actions\n self.max_states = GRID_WIDTH * GRID_HEIGHT\n self.max_actions = len(self.action_space)\n\n self.Q = np.zeros([GRID_WIDTH*GRID_HEIGHT, len(self.action_space)])\n\n self.tunnel_vision = False\n\n # Other\n self.WALLS = list(WALLS)\n self.WIN_STATE = WIN_STATE\n self.SPAWN_STATE = SPAWN_STATE\n\n def step(self, action):\n self.pos = self.moveDir(self.pos, self.action_space(action))\n\n reward = -0.04\n done = True\n if self.getState() == self.WIN_STATE:\n reward = 10\n else:\n done = False\n\n return (self.getState(), reward, done, {})\n\n def reset(self):\n self.pos = np.array(self.getPos(self.SPAWN_STATE))\n\n def render(self, screen, close=False):\n self.screen = screen\n self.screen.fill((0, 0, 0))\n\n # Draw the grid\n # font = pygame.font.Font(None, 22)\n for x in range(GRID_WIDTH):\n for y in range(GRID_HEIGHT):\n all_points = []\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])\n all_points.append([[x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])\n all_points.append([[x * BOX_WIDTH, y * BOX_HEIGHT], [x * BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])\n all_points.append([[x * BOX_WIDTH+BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH, y * BOX_HEIGHT+BOX_HEIGHT], [x * BOX_WIDTH+BOX_WIDTH/2, y * BOX_HEIGHT+BOX_HEIGHT/2]])\n\n width = 34\n height = 10\n text_offs = [[(BOX_WIDTH/2-width/2), height/2], [BOX_WIDTH-width, BOX_HEIGHT/2-height/2], [4, BOX_HEIGHT/2-height/2], [BOX_WIDTH/2-width/2, BOX_HEIGHT-height-4]]\n\n for a in range(4):\n s = pygame.Surface((BOX_WIDTH,BOX_HEIGHT), pygame.SRCALPHA)\n s.fill((0, 0, 0, 0))\n\n if self.getState((x, y)) == self.WIN_STATE:\n col = (0, 255, 0, 255)\n elif [x, y] in self.WALLS:\n col = (128, 128, 128, 255)\n elif len(self.Q) <= self.getState((x, y)) or len(self.Q[self.getState((x, y))]) <= a:\n col = (0, 0, 0, 0)\n elif self.Q[self.getState((x, y))][a] > 0:\n col = (0, 255, 0, 60 + self.Q[self.getState((x, y))][a] / self.Q.max() * 195)\n elif self.Q[self.getState((x, y))][a] < 0:\n col = (255, 0, 0, 60 + self.Q[self.getState((x, y))][a] / self.Q.min() * 195)\n else:\n col = (0, 0, 0, 0)\n\n if not self.tunnel_vision or self.getState((x, y)) == self.getState():\n pygame.draw.polygon(s, col, [[all_points[a][b][0]-x*BOX_WIDTH, all_points[a][b][1]-y*BOX_HEIGHT] for b in range(3)])\n self.screen.blit(s, (x*BOX_WIDTH, y*BOX_HEIGHT))\n\n if self.getState((x, y)) != self.WIN_STATE and [x, y] not in self.WALLS:\n pygame.draw.polygon(self.screen, (255, 255, 255), all_points[a], 2)\n\n #if BOX_WIDTH > 80:\n #trender = font.render(\"{0:.2f}\".format(self.Q[self.getState((x, y)), a]), True, (255, 255, 255))\n #self.screen.blit(trender, (x*BOX_WIDTH+text_offs[a][0], y*BOX_HEIGHT+text_offs[a][1]))\n\n # Draw the player\n pygame.draw.circle(self.screen, (0, 0, 255),\n (int((self.pos[0]+0.5)*BOX_WIDTH),\n int((self.pos[1]+0.5)*BOX_HEIGHT)),\n max(10, int(BOX_WIDTH/10)))\n\n pygame.display.update()\n\n def moveDir(self, pos, action):\n oldPos = list(pos)\n if action == Actions.FORWARD:\n pos[1] -= 1\n elif action == Actions.RIGHT:\n pos[0] += 1\n elif action == Actions.LEFT:\n pos[0] -= 1\n elif action == Actions.BACK:\n pos[1] += 1\n\n if pos[0] < 0 or pos[0] >= GRID_WIDTH or pos[1] < 0 or pos[1] >= GRID_HEIGHT \\\n or self.hitWall(pos):\n pos = oldPos\n\n return pos\n\n def hitWall(self, pos):\n for w in self.WALLS:\n if w[0] == pos[0] and w[1] == pos[1]:\n return True\n return False\n\n def getState(self, pos=False):\n if not pos:\n pos = self.pos\n\n return int(pos[1]*GRID_WIDTH+pos[0])\n\n def getPos(self, state):\n return [state % GRID_WIDTH, state // GRID_WIDTH]",
"step-ids": [
9,
10,
12,
13,
14
]
}
|
[
9,
10,
12,
13,
14
] |
#!/usr/bin/env python
def question():
print("02. 「パトカー」+「タクシー」=「パタトクカシーー」")
print("「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.")
def main():
str1 = "パトカー"
str2 = "タクシー"
print(''.join([x[0] + x[1] for x in zip(str1, str2)]))
if __name__ == '__main__':
question()
main()
|
normal
|
{
"blob_id": "32869a88bb59d47281249b6ebe2357328beb0359",
"index": 3572,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n str1 = 'パトカー'\n str2 = 'タクシー'\n print(''.join([(x[0] + x[1]) for x in zip(str1, str2)]))\n\n\n<mask token>\n",
"step-3": "def question():\n print('02. 「パトカー」+「タクシー」=「パタトクカシーー」')\n print('「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.')\n\n\ndef main():\n str1 = 'パトカー'\n str2 = 'タクシー'\n print(''.join([(x[0] + x[1]) for x in zip(str1, str2)]))\n\n\n<mask token>\n",
"step-4": "def question():\n print('02. 「パトカー」+「タクシー」=「パタトクカシーー」')\n print('「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.')\n\n\ndef main():\n str1 = 'パトカー'\n str2 = 'タクシー'\n print(''.join([(x[0] + x[1]) for x in zip(str1, str2)]))\n\n\nif __name__ == '__main__':\n question()\n main()\n",
"step-5": "#!/usr/bin/env python\n\ndef question():\n print(\"02. 「パトカー」+「タクシー」=「パタトクカシーー」\")\n print(\"「パトカー」+「タクシー」の文字を先頭から交互に連結して文字列「パタトクカシーー」を得よ.\")\n\ndef main():\n str1 = \"パトカー\"\n str2 = \"タクシー\"\n print(''.join([x[0] + x[1] for x in zip(str1, str2)]))\n\nif __name__ == '__main__':\n question()\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from googleAPI.drive import *
class GoogleSheet(GoogleDrive):
"""
The base class of Google Sheet API.
It aims at dealing with the Google Sheet data extract and append.
It is not tied to a specific spreadsheet.
This class is powered by pandas. Thus, make sure the data in the
spreadsheet is able to be processed by pandas.
Terminology:
Spreadsheet: The whole file. Same level as an Microsoft Excel file.
Sheet: A tab inside the spreadsheet. Same as Excel sheet.
A1 notation: A string like `Sheet1!A1:B2`, that refers to a group of
cells in the spreadsheet, and is typically used in formulas.
https://developers.google.com/sheets/api/guides/concepts#a1_notation
"""
def __init__(
self,
creds=None,
credential_path="",
credential_scopes=["https://www.googleapis.com/auth/drive"],
token_prefix="GoogleDrive_",
token_suffix="",
):
"""
Initialize the credential.
If credential `creds` is provided, this method will use it directly
if it is valid.
Otherwise, it will use `credential_path` and `credential_scopes` to
get the token.
Args:
creds: None or google.oauth2.credentials.Credentials, default None
credential_path: String, default ''
Path to the credential with either 'token.pickle' or
'credentials.json' in it.
credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']
Scope of the credential. Default scope can
'See, edit, create, and delete all of your Google Drive files'.
Details:
https://developers.google.com/identity/protocols/oauth2/scopes#sheets
token_prefix: String, default 'GoogleDrive_'
Prefix of token file. eg. '{token_prefix}token.pickle'.
token_suffix: String, default ''
Suffix of token file. eg. 'token{token_suffix}.pickle'.
"""
if creds is not None and self.credential_validation(creds):
self.creds = creds
else:
self.creds = self.credential(
credential_path, credential_scopes, token_prefix, token_suffix
)
def create_spreadsheet(self, spreadsheet_name: str):
"""
Creates a spreadsheet, returning the newly created spreadsheet's ID.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create
Args:
spreadsheet_name: String
The name of the spreadsheet.
Return:
spreadsheet ID: String
"""
service = build("sheets", "v4", credentials=self.creds)
spreadsheet = {"properties": {"title": spreadsheet_name}}
spreadsheet = (
service.spreadsheets()
.create(body=spreadsheet, fields="spreadsheetId")
.execute()
)
return spreadsheet.get("spreadsheetId")
def search_spreadsheet(self, spreadsheet_name: str):
"""
Searche for the spreadsheet in Google Drive and return the spreadsheet ID.
Since it is using Google Drive API, the scope must include reading
files in Google Drive.
If you want customized query, use `GoogleDrive.search_file()` instead.
Args:
spreadsheet_name: String
The name of the spreadsheet. There is no file extension.
Return:
Dictionary.
Key: Spreadsheet name.
Value: List of spreadsheet ID in case there are duplicate file names.
"""
result = self.search_file(file_name=spreadsheet_name)
return result
def get_spreadsheet_property(self, spreadsheet_id: str):
"""
Get spreadsheet property and sheet property.
Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.
Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get
Args:
spreadsheet_id: String
Spreadsheet ID.
Return:
Tuple: (spreadsheet_property, sheet_property)
spreadsheet_property: Dictionary
The entire spreadsheet property. It is the superset of the sheet property.
Structure of the response:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
sheet_property: Dictionary
sheetId: Dictionary, key: sheet name, value: sheet ID
The unique ID of each sheet regardless of position.
sheetIndex: Dictionary, key: sheet name, value: sheet index
The position of the sheet starting from 0.
sheetRowCount: Dictionary, key: sheet name, value: sheet row count
The numbder of rows in sheet. Note that this is not the number of
rows that contains data.It is the boundary of the sheet.
sheetColCount: Dictionary, key: sheet name, value: sheet column count
The numbder of columns in sheet. Note that this is not the number
of columns that contains data.It is the boundary of the sheet.
"""
service = build("sheets", "v4", credentials=self.creds)
request = service.spreadsheets().get(
spreadsheetId=spreadsheet_id, includeGridData=False
)
# Spreadsheet property
spreadsheet_property = request.execute()
# Sheet property
sheetId = {
d["properties"]["title"]: d["properties"]["sheetId"]
for d in spreadsheet_property["sheets"]
}
sheetIndex = {
d["properties"]["title"]: d["properties"]["index"]
for d in spreadsheet_property["sheets"]
}
sheetRowCount = {
d["properties"]["title"]: d["properties"]["gridProperties"]["rowCount"]
for d in spreadsheet_property["sheets"]
}
sheetColCount = {
d["properties"]["title"]: d["properties"]["gridProperties"]["columnCount"]
for d in spreadsheet_property["sheets"]
}
sheet_property = {
"sheetId": sheetId,
"sheetIndex": sheetIndex,
"sheetRowCount": sheetRowCount,
"sheetColCount": sheetColCount,
}
return spreadsheet_property, sheet_property
def download_spreadsheet(self, spreadsheet_id: str, save_as=""):
"""
Download the spreadsheet by given the spreadsheet ID
and return a file pointer or save it as a file.
Supported file formats: .xlsx, .csv, .pdf.
For unsupported file formats i.e. Open Office sheet,
sheet only, and HTML, use `GoogleDrive.download_file()`.
Official API guide:
https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive
Args:
spreadsheet_id: String
The spreadsheet ID.
save_as: String, default ''
'': Return a file pointer.
'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.
'CSV': Save as '{Spreadsheet name}.csv'. Return None.
First sheet only.
'PDF': Save as '{Spreadsheet name}.pdf'. Return None.
'*.xlsx': Save as '*.xlsx'. Return None.
'*.csv': Save as '*.csv'. Return None.
'*.pdf': Save as '*.pdf'. Return None.
Return:
None or file pointer depending on the `save_as`
"""
spreadsheet_name = self.get_file_metadata(
file_id=spreadsheet_id, fields="name"
)["name"]
mimeType = {
"Excel": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"Open Office sheet": "application/x-vnd.oasis.opendocument.spreadsheet",
"PDF": "application/pdf",
"CSV": "text/csv",
}
if save_as == "":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["Excel"]
)
elif save_as == "Excel":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["Excel"],
save_as="{0}.xlsx".format(spreadsheet_name),
)
elif save_as == "CSV":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["CSV"],
save_as="{0}.csv".format(spreadsheet_name),
)
elif save_as == "PDF":
result = self.download_file(
file_id=spreadsheet_id,
mimeType=mimeType["PDF"],
save_as="{0}.pdf".format(spreadsheet_name),
)
elif save_as[-5:] == ".xlsx":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["Excel"], save_as=save_as
)
elif save_as[-4:] == ".csv":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["CSV"], save_as=save_as
)
elif save_as[-4:] == ".pdf":
result = self.download_file(
file_id=spreadsheet_id, mimeType=mimeType["PDF"], save_as=save_as
)
else:
raise Exception(
textwrap.dedent(
"""\
{0} is not a supported file format.
Please check the `GoogleSheet.download_spreadsheet()` docstring.
Or you may want to use `GoogleDrive.download_file()` method.\
""".format(
save_as
)
)
)
return result
def get_values(
self,
spreadsheet_id: str,
range_,
value_render_option=None,
date_time_render_option=None,
):
"""
Get a single value, a range of values, and several ranges of values.
Use `GoogleSheet.download_spreadsheet()` if you want to get the
entire spreadsheet.
Official API guide:
For single range:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
For multiple ranges:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
Example:
Get the entire sheet of `Sheet 1`.
>>> gs.get_values(spreadsheet_id, "'Sheet 1'")
Get the value of cell `A5` in `Sheet 1`.
>>> gs.get_values(spreadsheet_id, "'Sheet 1'!A5")
Args:
spreadsheet_id: String
range_: String or List of strings in A1 notation
String: A single sheet, A single range
List of strings: Several ranges
value_render_option: String, default None
How values should be represented in the output.
The default render option is `ValueRenderOption.FORMATTED_VALUE`.
Details:
https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
date_time_render_option: String, default None
How dates, times, and durations should be represented in the output.
Details:
https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption
Return:
ValueRange in Dictionary
Details:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange
"""
service = build("sheets", "v4", credentials=self.creds)
# How values should be represented in the output.
# The default render option is ValueRenderOption.FORMATTED_VALUE.
value_render_option = value_render_option
# How dates, times, and durations should be represented in the output.
# This is ignored if value_render_option is
# FORMATTED_VALUE.
# The default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].
date_time_render_option = date_time_render_option
request = (
service.spreadsheets()
.values()
.batchGet(
spreadsheetId=spreadsheet_id,
ranges=range_,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
)
result = request.execute()
return result
def clear_values(self, spreadsheet_id: str, range_):
"""
Clear values from a spreadsheet.
Only values are cleared -- all other properties of
the cell (such as formatting, data validation, etc..) are kept.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear
Args:
spreadsheet_id: String
range_: String, A1 notation
Return:
Dictionary, cleared range
{
"spreadsheetId": string,
"clearedRange": string
}
"""
service = build("sheets", "v4", credentials=self.creds)
batch_clear_values_request_body = {
# The ranges to clear, in A1 notation.
"ranges": range_
# TODO: Add desired entries to the request body.
}
request = (
service.spreadsheets()
.values()
.batchClear(
spreadsheetId=spreadsheet_id, body=batch_clear_values_request_body
)
)
result = request.execute()
return result
def update_values(self, spreadsheet_id: str, data, value_input_option="RAW"):
"""
Sets values in a range of a spreadsheet.
Official API guide:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
Args:
spreadsheet_id: String
data: ValueRange in Dictionary
{
"range": string,
"majorDimension": enum (Dimension),
"values": [
array
]
}
Details:
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange
Return:
Dictionary in structure:
{
"spreadsheetId": string,
"totalUpdatedRows": integer,
"totalUpdatedColumns": integer,
"totalUpdatedCells": integer,
"totalUpdatedSheets": integer,
"responses": [
{
object (UpdateValuesResponse)
}
]
}
"""
service = build("sheets", "v4", credentials=self.creds)
batch_update_values_request_body = {
# How the input data should be interpreted.
"value_input_option": value_input_option, # 'USER_ENTERED'
# The new values to apply to the spreadsheet.
"data": data,
}
request = (
service.spreadsheets()
.values()
.batchUpdate(
spreadsheetId=spreadsheet_id, body=batch_update_values_request_body
)
)
result = request.execute()
return result
def update_column_format(self):
"""
Update the column format.
Supported format: date, number, currency
Officail API guide:
https://developers.google.com/sheets/api/samples/formatting
https://developers.google.com/sheets/api/guides/formats
https://developers.google.com/sheets/api/guides/batchupdate
"""
pass
|
normal
|
{
"blob_id": "9e793bd0faef65dfe8ac4b722e50d2055837449f",
"index": 4701,
"step-1": "<mask token>\n\n\nclass GoogleSheet(GoogleDrive):\n <mask token>\n <mask token>\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n spreadsheet = {'properties': {'title': spreadsheet_name}}\n spreadsheet = service.spreadsheets().create(body=spreadsheet,\n fields='spreadsheetId').execute()\n return spreadsheet.get('spreadsheetId')\n <mask token>\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n request = service.spreadsheets().get(spreadsheetId=spreadsheet_id,\n includeGridData=False)\n spreadsheet_property = request.execute()\n sheetId = {d['properties']['title']: d['properties']['sheetId'] for\n d in spreadsheet_property['sheets']}\n sheetIndex = {d['properties']['title']: d['properties']['index'] for\n d in spreadsheet_property['sheets']}\n sheetRowCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['rowCount'] for d in spreadsheet_property[\n 'sheets']}\n sheetColCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['columnCount'] for d in spreadsheet_property[\n 'sheets']}\n sheet_property = {'sheetId': sheetId, 'sheetIndex': sheetIndex,\n 'sheetRowCount': sheetRowCount, 'sheetColCount': sheetColCount}\n return spreadsheet_property, sheet_property\n <mask token>\n <mask token>\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_clear_values_request_body = {'ranges': range_}\n request = service.spreadsheets().values().batchClear(spreadsheetId=\n spreadsheet_id, body=batch_clear_values_request_body)\n result = request.execute()\n return result\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass GoogleSheet(GoogleDrive):\n <mask token>\n\n def __init__(self, creds=None, credential_path='', credential_scopes=[\n 'https://www.googleapis.com/auth/drive'], token_prefix=\n 'GoogleDrive_', token_suffix=''):\n \"\"\"\n Initialize the credential.\n \n If credential `creds` is provided, this method will use it directly \n if it is valid.\n \n Otherwise, it will use `credential_path` and `credential_scopes` to\n get the token.\n \n Args:\n creds: None or google.oauth2.credentials.Credentials, default None\n credential_path: String, default ''\n Path to the credential with either 'token.pickle' or\n 'credentials.json' in it.\n credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']\n Scope of the credential. Default scope can\n 'See, edit, create, and delete all of your Google Drive files'.\n Details:\n https://developers.google.com/identity/protocols/oauth2/scopes#sheets\n token_prefix: String, default 'GoogleDrive_'\n Prefix of token file. eg. '{token_prefix}token.pickle'.\n token_suffix: String, default ''\n Suffix of token file. eg. 'token{token_suffix}.pickle'.\n \"\"\"\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(credential_path, credential_scopes,\n token_prefix, token_suffix)\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n spreadsheet = {'properties': {'title': spreadsheet_name}}\n spreadsheet = service.spreadsheets().create(body=spreadsheet,\n fields='spreadsheetId').execute()\n return spreadsheet.get('spreadsheetId')\n\n def search_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Searche for the spreadsheet in Google Drive and return the spreadsheet ID.\n \n Since it is using Google Drive API, the scope must include reading\n files in Google Drive.\n \n If you want customized query, use `GoogleDrive.search_file()` instead.\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet. There is no file extension.\n \n Return:\n Dictionary.\n Key: Spreadsheet name.\n Value: List of spreadsheet ID in case there are duplicate file names.\n \"\"\"\n result = self.search_file(file_name=spreadsheet_name)\n return result\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n request = service.spreadsheets().get(spreadsheetId=spreadsheet_id,\n includeGridData=False)\n spreadsheet_property = request.execute()\n sheetId = {d['properties']['title']: d['properties']['sheetId'] for\n d in spreadsheet_property['sheets']}\n sheetIndex = {d['properties']['title']: d['properties']['index'] for\n d in spreadsheet_property['sheets']}\n sheetRowCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['rowCount'] for d in spreadsheet_property[\n 'sheets']}\n sheetColCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['columnCount'] for d in spreadsheet_property[\n 'sheets']}\n sheet_property = {'sheetId': sheetId, 'sheetIndex': sheetIndex,\n 'sheetRowCount': sheetRowCount, 'sheetColCount': sheetColCount}\n return spreadsheet_property, sheet_property\n\n def download_spreadsheet(self, spreadsheet_id: str, save_as=''):\n \"\"\"\n Download the spreadsheet by given the spreadsheet ID\n and return a file pointer or save it as a file.\n \n Supported file formats: .xlsx, .csv, .pdf.\n For unsupported file formats i.e. Open Office sheet,\n sheet only, and HTML, use `GoogleDrive.download_file()`.\n \n Official API guide:\n https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive\n \n Args:\n spreadsheet_id: String\n The spreadsheet ID.\n save_as: String, default ''\n '': Return a file pointer.\n 'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.\n 'CSV': Save as '{Spreadsheet name}.csv'. Return None.\n First sheet only.\n 'PDF': Save as '{Spreadsheet name}.pdf'. Return None.\n '*.xlsx': Save as '*.xlsx'. Return None.\n '*.csv': Save as '*.csv'. Return None.\n '*.pdf': Save as '*.pdf'. Return None.\n\n Return:\n None or file pointer depending on the `save_as`\n \"\"\"\n spreadsheet_name = self.get_file_metadata(file_id=spreadsheet_id,\n fields='name')['name']\n mimeType = {'Excel':\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n , 'Open Office sheet':\n 'application/x-vnd.oasis.opendocument.spreadsheet', 'PDF':\n 'application/pdf', 'CSV': 'text/csv'}\n if save_as == '':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'])\n elif save_as == 'Excel':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as='{0}.xlsx'.format(spreadsheet_name))\n elif save_as == 'CSV':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as='{0}.csv'.format(spreadsheet_name))\n elif save_as == 'PDF':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as='{0}.pdf'.format(spreadsheet_name))\n elif save_as[-5:] == '.xlsx':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as=save_as)\n elif save_as[-4:] == '.csv':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as=save_as)\n elif save_as[-4:] == '.pdf':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as=save_as)\n else:\n raise Exception(textwrap.dedent(\n \"\"\" {0} is not a supported file format.\n Please check the `GoogleSheet.download_spreadsheet()` docstring.\n Or you may want to use `GoogleDrive.download_file()` method. \"\"\"\n .format(save_as)))\n return result\n <mask token>\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_clear_values_request_body = {'ranges': range_}\n request = service.spreadsheets().values().batchClear(spreadsheetId=\n spreadsheet_id, body=batch_clear_values_request_body)\n result = request.execute()\n return result\n <mask token>\n\n def update_column_format(self):\n \"\"\"\n Update the column format.\n \n Supported format: date, number, currency\n \n Officail API guide:\n https://developers.google.com/sheets/api/samples/formatting\n https://developers.google.com/sheets/api/guides/formats\n https://developers.google.com/sheets/api/guides/batchupdate\n \"\"\"\n pass\n",
"step-3": "<mask token>\n\n\nclass GoogleSheet(GoogleDrive):\n <mask token>\n\n def __init__(self, creds=None, credential_path='', credential_scopes=[\n 'https://www.googleapis.com/auth/drive'], token_prefix=\n 'GoogleDrive_', token_suffix=''):\n \"\"\"\n Initialize the credential.\n \n If credential `creds` is provided, this method will use it directly \n if it is valid.\n \n Otherwise, it will use `credential_path` and `credential_scopes` to\n get the token.\n \n Args:\n creds: None or google.oauth2.credentials.Credentials, default None\n credential_path: String, default ''\n Path to the credential with either 'token.pickle' or\n 'credentials.json' in it.\n credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']\n Scope of the credential. Default scope can\n 'See, edit, create, and delete all of your Google Drive files'.\n Details:\n https://developers.google.com/identity/protocols/oauth2/scopes#sheets\n token_prefix: String, default 'GoogleDrive_'\n Prefix of token file. eg. '{token_prefix}token.pickle'.\n token_suffix: String, default ''\n Suffix of token file. eg. 'token{token_suffix}.pickle'.\n \"\"\"\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(credential_path, credential_scopes,\n token_prefix, token_suffix)\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n spreadsheet = {'properties': {'title': spreadsheet_name}}\n spreadsheet = service.spreadsheets().create(body=spreadsheet,\n fields='spreadsheetId').execute()\n return spreadsheet.get('spreadsheetId')\n\n def search_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Searche for the spreadsheet in Google Drive and return the spreadsheet ID.\n \n Since it is using Google Drive API, the scope must include reading\n files in Google Drive.\n \n If you want customized query, use `GoogleDrive.search_file()` instead.\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet. There is no file extension.\n \n Return:\n Dictionary.\n Key: Spreadsheet name.\n Value: List of spreadsheet ID in case there are duplicate file names.\n \"\"\"\n result = self.search_file(file_name=spreadsheet_name)\n return result\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n request = service.spreadsheets().get(spreadsheetId=spreadsheet_id,\n includeGridData=False)\n spreadsheet_property = request.execute()\n sheetId = {d['properties']['title']: d['properties']['sheetId'] for\n d in spreadsheet_property['sheets']}\n sheetIndex = {d['properties']['title']: d['properties']['index'] for\n d in spreadsheet_property['sheets']}\n sheetRowCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['rowCount'] for d in spreadsheet_property[\n 'sheets']}\n sheetColCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['columnCount'] for d in spreadsheet_property[\n 'sheets']}\n sheet_property = {'sheetId': sheetId, 'sheetIndex': sheetIndex,\n 'sheetRowCount': sheetRowCount, 'sheetColCount': sheetColCount}\n return spreadsheet_property, sheet_property\n\n def download_spreadsheet(self, spreadsheet_id: str, save_as=''):\n \"\"\"\n Download the spreadsheet by given the spreadsheet ID\n and return a file pointer or save it as a file.\n \n Supported file formats: .xlsx, .csv, .pdf.\n For unsupported file formats i.e. Open Office sheet,\n sheet only, and HTML, use `GoogleDrive.download_file()`.\n \n Official API guide:\n https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive\n \n Args:\n spreadsheet_id: String\n The spreadsheet ID.\n save_as: String, default ''\n '': Return a file pointer.\n 'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.\n 'CSV': Save as '{Spreadsheet name}.csv'. Return None.\n First sheet only.\n 'PDF': Save as '{Spreadsheet name}.pdf'. Return None.\n '*.xlsx': Save as '*.xlsx'. Return None.\n '*.csv': Save as '*.csv'. Return None.\n '*.pdf': Save as '*.pdf'. Return None.\n\n Return:\n None or file pointer depending on the `save_as`\n \"\"\"\n spreadsheet_name = self.get_file_metadata(file_id=spreadsheet_id,\n fields='name')['name']\n mimeType = {'Excel':\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n , 'Open Office sheet':\n 'application/x-vnd.oasis.opendocument.spreadsheet', 'PDF':\n 'application/pdf', 'CSV': 'text/csv'}\n if save_as == '':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'])\n elif save_as == 'Excel':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as='{0}.xlsx'.format(spreadsheet_name))\n elif save_as == 'CSV':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as='{0}.csv'.format(spreadsheet_name))\n elif save_as == 'PDF':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as='{0}.pdf'.format(spreadsheet_name))\n elif save_as[-5:] == '.xlsx':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as=save_as)\n elif save_as[-4:] == '.csv':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as=save_as)\n elif save_as[-4:] == '.pdf':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as=save_as)\n else:\n raise Exception(textwrap.dedent(\n \"\"\" {0} is not a supported file format.\n Please check the `GoogleSheet.download_spreadsheet()` docstring.\n Or you may want to use `GoogleDrive.download_file()` method. \"\"\"\n .format(save_as)))\n return result\n\n def get_values(self, spreadsheet_id: str, range_, value_render_option=\n None, date_time_render_option=None):\n \"\"\"\n Get a single value, a range of values, and several ranges of values.\n \n Use `GoogleSheet.download_spreadsheet()` if you want to get the\n entire spreadsheet.\n \n Official API guide:\n For single range:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get\n For multiple ranges:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet\n \n Example:\n Get the entire sheet of `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'\")\n\n Get the value of cell `A5` in `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'!A5\")\n\n Args:\n spreadsheet_id: String\n range_: String or List of strings in A1 notation\n String: A single sheet, A single range\n List of strings: Several ranges\n value_render_option: String, default None\n How values should be represented in the output.\n The default render option is `ValueRenderOption.FORMATTED_VALUE`.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n date_time_render_option: String, default None\n How dates, times, and durations should be represented in the output.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n\n Return:\n ValueRange in Dictionary\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n value_render_option = value_render_option\n date_time_render_option = date_time_render_option\n request = service.spreadsheets().values().batchGet(spreadsheetId=\n spreadsheet_id, ranges=range_, valueRenderOption=\n value_render_option, dateTimeRenderOption=date_time_render_option)\n result = request.execute()\n return result\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_clear_values_request_body = {'ranges': range_}\n request = service.spreadsheets().values().batchClear(spreadsheetId=\n spreadsheet_id, body=batch_clear_values_request_body)\n result = request.execute()\n return result\n\n def update_values(self, spreadsheet_id: str, data, value_input_option='RAW'\n ):\n \"\"\"\n Sets values in a range of a spreadsheet.\n\n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update\n\n Args:\n spreadsheet_id: String\n data: ValueRange in Dictionary\n {\n \"range\": string,\n \"majorDimension\": enum (Dimension),\n \"values\": [\n array\n ]\n }\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange\n \n Return:\n Dictionary in structure:\n {\n \"spreadsheetId\": string,\n \"totalUpdatedRows\": integer,\n \"totalUpdatedColumns\": integer,\n \"totalUpdatedCells\": integer,\n \"totalUpdatedSheets\": integer,\n \"responses\": [\n {\n object (UpdateValuesResponse)\n }\n ]\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_update_values_request_body = {'value_input_option':\n value_input_option, 'data': data}\n request = service.spreadsheets().values().batchUpdate(spreadsheetId\n =spreadsheet_id, body=batch_update_values_request_body)\n result = request.execute()\n return result\n\n def update_column_format(self):\n \"\"\"\n Update the column format.\n \n Supported format: date, number, currency\n \n Officail API guide:\n https://developers.google.com/sheets/api/samples/formatting\n https://developers.google.com/sheets/api/guides/formats\n https://developers.google.com/sheets/api/guides/batchupdate\n \"\"\"\n pass\n",
"step-4": "from googleAPI.drive import *\n\n\nclass GoogleSheet(GoogleDrive):\n \"\"\"\n The base class of Google Sheet API.\n \n It aims at dealing with the Google Sheet data extract and append.\n It is not tied to a specific spreadsheet.\n \n This class is powered by pandas. Thus, make sure the data in the\n spreadsheet is able to be processed by pandas.\n \n Terminology:\n Spreadsheet: The whole file. Same level as an Microsoft Excel file.\n Sheet: A tab inside the spreadsheet. Same as Excel sheet.\n A1 notation: A string like `Sheet1!A1:B2`, that refers to a group of \n cells in the spreadsheet, and is typically used in formulas.\n https://developers.google.com/sheets/api/guides/concepts#a1_notation\n \"\"\"\n\n def __init__(self, creds=None, credential_path='', credential_scopes=[\n 'https://www.googleapis.com/auth/drive'], token_prefix=\n 'GoogleDrive_', token_suffix=''):\n \"\"\"\n Initialize the credential.\n \n If credential `creds` is provided, this method will use it directly \n if it is valid.\n \n Otherwise, it will use `credential_path` and `credential_scopes` to\n get the token.\n \n Args:\n creds: None or google.oauth2.credentials.Credentials, default None\n credential_path: String, default ''\n Path to the credential with either 'token.pickle' or\n 'credentials.json' in it.\n credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']\n Scope of the credential. Default scope can\n 'See, edit, create, and delete all of your Google Drive files'.\n Details:\n https://developers.google.com/identity/protocols/oauth2/scopes#sheets\n token_prefix: String, default 'GoogleDrive_'\n Prefix of token file. eg. '{token_prefix}token.pickle'.\n token_suffix: String, default ''\n Suffix of token file. eg. 'token{token_suffix}.pickle'.\n \"\"\"\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(credential_path, credential_scopes,\n token_prefix, token_suffix)\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n spreadsheet = {'properties': {'title': spreadsheet_name}}\n spreadsheet = service.spreadsheets().create(body=spreadsheet,\n fields='spreadsheetId').execute()\n return spreadsheet.get('spreadsheetId')\n\n def search_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Searche for the spreadsheet in Google Drive and return the spreadsheet ID.\n \n Since it is using Google Drive API, the scope must include reading\n files in Google Drive.\n \n If you want customized query, use `GoogleDrive.search_file()` instead.\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet. There is no file extension.\n \n Return:\n Dictionary.\n Key: Spreadsheet name.\n Value: List of spreadsheet ID in case there are duplicate file names.\n \"\"\"\n result = self.search_file(file_name=spreadsheet_name)\n return result\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n request = service.spreadsheets().get(spreadsheetId=spreadsheet_id,\n includeGridData=False)\n spreadsheet_property = request.execute()\n sheetId = {d['properties']['title']: d['properties']['sheetId'] for\n d in spreadsheet_property['sheets']}\n sheetIndex = {d['properties']['title']: d['properties']['index'] for\n d in spreadsheet_property['sheets']}\n sheetRowCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['rowCount'] for d in spreadsheet_property[\n 'sheets']}\n sheetColCount = {d['properties']['title']: d['properties'][\n 'gridProperties']['columnCount'] for d in spreadsheet_property[\n 'sheets']}\n sheet_property = {'sheetId': sheetId, 'sheetIndex': sheetIndex,\n 'sheetRowCount': sheetRowCount, 'sheetColCount': sheetColCount}\n return spreadsheet_property, sheet_property\n\n def download_spreadsheet(self, spreadsheet_id: str, save_as=''):\n \"\"\"\n Download the spreadsheet by given the spreadsheet ID\n and return a file pointer or save it as a file.\n \n Supported file formats: .xlsx, .csv, .pdf.\n For unsupported file formats i.e. Open Office sheet,\n sheet only, and HTML, use `GoogleDrive.download_file()`.\n \n Official API guide:\n https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive\n \n Args:\n spreadsheet_id: String\n The spreadsheet ID.\n save_as: String, default ''\n '': Return a file pointer.\n 'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.\n 'CSV': Save as '{Spreadsheet name}.csv'. Return None.\n First sheet only.\n 'PDF': Save as '{Spreadsheet name}.pdf'. Return None.\n '*.xlsx': Save as '*.xlsx'. Return None.\n '*.csv': Save as '*.csv'. Return None.\n '*.pdf': Save as '*.pdf'. Return None.\n\n Return:\n None or file pointer depending on the `save_as`\n \"\"\"\n spreadsheet_name = self.get_file_metadata(file_id=spreadsheet_id,\n fields='name')['name']\n mimeType = {'Excel':\n 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n , 'Open Office sheet':\n 'application/x-vnd.oasis.opendocument.spreadsheet', 'PDF':\n 'application/pdf', 'CSV': 'text/csv'}\n if save_as == '':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'])\n elif save_as == 'Excel':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as='{0}.xlsx'.format(spreadsheet_name))\n elif save_as == 'CSV':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as='{0}.csv'.format(spreadsheet_name))\n elif save_as == 'PDF':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as='{0}.pdf'.format(spreadsheet_name))\n elif save_as[-5:] == '.xlsx':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['Excel'], save_as=save_as)\n elif save_as[-4:] == '.csv':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['CSV'], save_as=save_as)\n elif save_as[-4:] == '.pdf':\n result = self.download_file(file_id=spreadsheet_id, mimeType=\n mimeType['PDF'], save_as=save_as)\n else:\n raise Exception(textwrap.dedent(\n \"\"\" {0} is not a supported file format.\n Please check the `GoogleSheet.download_spreadsheet()` docstring.\n Or you may want to use `GoogleDrive.download_file()` method. \"\"\"\n .format(save_as)))\n return result\n\n def get_values(self, spreadsheet_id: str, range_, value_render_option=\n None, date_time_render_option=None):\n \"\"\"\n Get a single value, a range of values, and several ranges of values.\n \n Use `GoogleSheet.download_spreadsheet()` if you want to get the\n entire spreadsheet.\n \n Official API guide:\n For single range:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get\n For multiple ranges:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet\n \n Example:\n Get the entire sheet of `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'\")\n\n Get the value of cell `A5` in `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'!A5\")\n\n Args:\n spreadsheet_id: String\n range_: String or List of strings in A1 notation\n String: A single sheet, A single range\n List of strings: Several ranges\n value_render_option: String, default None\n How values should be represented in the output.\n The default render option is `ValueRenderOption.FORMATTED_VALUE`.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n date_time_render_option: String, default None\n How dates, times, and durations should be represented in the output.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n\n Return:\n ValueRange in Dictionary\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n value_render_option = value_render_option\n date_time_render_option = date_time_render_option\n request = service.spreadsheets().values().batchGet(spreadsheetId=\n spreadsheet_id, ranges=range_, valueRenderOption=\n value_render_option, dateTimeRenderOption=date_time_render_option)\n result = request.execute()\n return result\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_clear_values_request_body = {'ranges': range_}\n request = service.spreadsheets().values().batchClear(spreadsheetId=\n spreadsheet_id, body=batch_clear_values_request_body)\n result = request.execute()\n return result\n\n def update_values(self, spreadsheet_id: str, data, value_input_option='RAW'\n ):\n \"\"\"\n Sets values in a range of a spreadsheet.\n\n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update\n\n Args:\n spreadsheet_id: String\n data: ValueRange in Dictionary\n {\n \"range\": string,\n \"majorDimension\": enum (Dimension),\n \"values\": [\n array\n ]\n }\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange\n \n Return:\n Dictionary in structure:\n {\n \"spreadsheetId\": string,\n \"totalUpdatedRows\": integer,\n \"totalUpdatedColumns\": integer,\n \"totalUpdatedCells\": integer,\n \"totalUpdatedSheets\": integer,\n \"responses\": [\n {\n object (UpdateValuesResponse)\n }\n ]\n }\n \"\"\"\n service = build('sheets', 'v4', credentials=self.creds)\n batch_update_values_request_body = {'value_input_option':\n value_input_option, 'data': data}\n request = service.spreadsheets().values().batchUpdate(spreadsheetId\n =spreadsheet_id, body=batch_update_values_request_body)\n result = request.execute()\n return result\n\n def update_column_format(self):\n \"\"\"\n Update the column format.\n \n Supported format: date, number, currency\n \n Officail API guide:\n https://developers.google.com/sheets/api/samples/formatting\n https://developers.google.com/sheets/api/guides/formats\n https://developers.google.com/sheets/api/guides/batchupdate\n \"\"\"\n pass\n",
"step-5": "from googleAPI.drive import *\n\n\nclass GoogleSheet(GoogleDrive):\n \"\"\"\n The base class of Google Sheet API.\n \n It aims at dealing with the Google Sheet data extract and append.\n It is not tied to a specific spreadsheet.\n \n This class is powered by pandas. Thus, make sure the data in the\n spreadsheet is able to be processed by pandas.\n \n Terminology:\n Spreadsheet: The whole file. Same level as an Microsoft Excel file.\n Sheet: A tab inside the spreadsheet. Same as Excel sheet.\n A1 notation: A string like `Sheet1!A1:B2`, that refers to a group of \n cells in the spreadsheet, and is typically used in formulas.\n https://developers.google.com/sheets/api/guides/concepts#a1_notation\n \"\"\"\n\n def __init__(\n self,\n creds=None,\n credential_path=\"\",\n credential_scopes=[\"https://www.googleapis.com/auth/drive\"],\n token_prefix=\"GoogleDrive_\",\n token_suffix=\"\",\n ):\n \"\"\"\n Initialize the credential.\n \n If credential `creds` is provided, this method will use it directly \n if it is valid.\n \n Otherwise, it will use `credential_path` and `credential_scopes` to\n get the token.\n \n Args:\n creds: None or google.oauth2.credentials.Credentials, default None\n credential_path: String, default ''\n Path to the credential with either 'token.pickle' or\n 'credentials.json' in it.\n credential_scopes: List of strings, default ['https://www.googleapis.com/auth/drive']\n Scope of the credential. Default scope can\n 'See, edit, create, and delete all of your Google Drive files'.\n Details:\n https://developers.google.com/identity/protocols/oauth2/scopes#sheets\n token_prefix: String, default 'GoogleDrive_'\n Prefix of token file. eg. '{token_prefix}token.pickle'.\n token_suffix: String, default ''\n Suffix of token file. eg. 'token{token_suffix}.pickle'.\n \"\"\"\n if creds is not None and self.credential_validation(creds):\n self.creds = creds\n else:\n self.creds = self.credential(\n credential_path, credential_scopes, token_prefix, token_suffix\n )\n\n def create_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Creates a spreadsheet, returning the newly created spreadsheet's ID.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/create\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet.\n \n Return:\n spreadsheet ID: String\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n spreadsheet = {\"properties\": {\"title\": spreadsheet_name}}\n spreadsheet = (\n service.spreadsheets()\n .create(body=spreadsheet, fields=\"spreadsheetId\")\n .execute()\n )\n return spreadsheet.get(\"spreadsheetId\")\n\n def search_spreadsheet(self, spreadsheet_name: str):\n \"\"\"\n Searche for the spreadsheet in Google Drive and return the spreadsheet ID.\n \n Since it is using Google Drive API, the scope must include reading\n files in Google Drive.\n \n If you want customized query, use `GoogleDrive.search_file()` instead.\n \n Args:\n spreadsheet_name: String\n The name of the spreadsheet. There is no file extension.\n \n Return:\n Dictionary.\n Key: Spreadsheet name.\n Value: List of spreadsheet ID in case there are duplicate file names.\n \"\"\"\n result = self.search_file(file_name=spreadsheet_name)\n return result\n\n def get_spreadsheet_property(self, spreadsheet_id: str):\n \"\"\"\n Get spreadsheet property and sheet property.\n \n Spreadsheet property includes the title, locale, timeZone, defaultFormat, etc.\n \n Sheet property includes sheetID, sheetIndex, sheetRowCount, and sheetColCount.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets/get\n \n Args:\n spreadsheet_id: String\n Spreadsheet ID.\n \n Return:\n Tuple: (spreadsheet_property, sheet_property)\n spreadsheet_property: Dictionary\n The entire spreadsheet property. It is the superset of the sheet property.\n Structure of the response:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet\n sheet_property: Dictionary\n sheetId: Dictionary, key: sheet name, value: sheet ID\n The unique ID of each sheet regardless of position.\n sheetIndex: Dictionary, key: sheet name, value: sheet index\n The position of the sheet starting from 0.\n sheetRowCount: Dictionary, key: sheet name, value: sheet row count\n The numbder of rows in sheet. Note that this is not the number of \n rows that contains data.It is the boundary of the sheet.\n sheetColCount: Dictionary, key: sheet name, value: sheet column count\n The numbder of columns in sheet. Note that this is not the number \n of columns that contains data.It is the boundary of the sheet.\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n request = service.spreadsheets().get(\n spreadsheetId=spreadsheet_id, includeGridData=False\n )\n # Spreadsheet property\n spreadsheet_property = request.execute()\n\n # Sheet property\n sheetId = {\n d[\"properties\"][\"title\"]: d[\"properties\"][\"sheetId\"]\n for d in spreadsheet_property[\"sheets\"]\n }\n sheetIndex = {\n d[\"properties\"][\"title\"]: d[\"properties\"][\"index\"]\n for d in spreadsheet_property[\"sheets\"]\n }\n sheetRowCount = {\n d[\"properties\"][\"title\"]: d[\"properties\"][\"gridProperties\"][\"rowCount\"]\n for d in spreadsheet_property[\"sheets\"]\n }\n sheetColCount = {\n d[\"properties\"][\"title\"]: d[\"properties\"][\"gridProperties\"][\"columnCount\"]\n for d in spreadsheet_property[\"sheets\"]\n }\n sheet_property = {\n \"sheetId\": sheetId,\n \"sheetIndex\": sheetIndex,\n \"sheetRowCount\": sheetRowCount,\n \"sheetColCount\": sheetColCount,\n }\n return spreadsheet_property, sheet_property\n\n def download_spreadsheet(self, spreadsheet_id: str, save_as=\"\"):\n \"\"\"\n Download the spreadsheet by given the spreadsheet ID\n and return a file pointer or save it as a file.\n \n Supported file formats: .xlsx, .csv, .pdf.\n For unsupported file formats i.e. Open Office sheet,\n sheet only, and HTML, use `GoogleDrive.download_file()`.\n \n Official API guide:\n https://developers.google.com/drive/api/v3/manage-downloads#download_a_file_stored_on_google_drive\n \n Args:\n spreadsheet_id: String\n The spreadsheet ID.\n save_as: String, default ''\n '': Return a file pointer.\n 'Excel': Save as '{Spreadsheet name}.xlsx'. Return None.\n 'CSV': Save as '{Spreadsheet name}.csv'. Return None.\n First sheet only.\n 'PDF': Save as '{Spreadsheet name}.pdf'. Return None.\n '*.xlsx': Save as '*.xlsx'. Return None.\n '*.csv': Save as '*.csv'. Return None.\n '*.pdf': Save as '*.pdf'. Return None.\n\n Return:\n None or file pointer depending on the `save_as`\n \"\"\"\n spreadsheet_name = self.get_file_metadata(\n file_id=spreadsheet_id, fields=\"name\"\n )[\"name\"]\n mimeType = {\n \"Excel\": \"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\",\n \"Open Office sheet\": \"application/x-vnd.oasis.opendocument.spreadsheet\",\n \"PDF\": \"application/pdf\",\n \"CSV\": \"text/csv\",\n }\n\n if save_as == \"\":\n result = self.download_file(\n file_id=spreadsheet_id, mimeType=mimeType[\"Excel\"]\n )\n elif save_as == \"Excel\":\n result = self.download_file(\n file_id=spreadsheet_id,\n mimeType=mimeType[\"Excel\"],\n save_as=\"{0}.xlsx\".format(spreadsheet_name),\n )\n elif save_as == \"CSV\":\n result = self.download_file(\n file_id=spreadsheet_id,\n mimeType=mimeType[\"CSV\"],\n save_as=\"{0}.csv\".format(spreadsheet_name),\n )\n elif save_as == \"PDF\":\n result = self.download_file(\n file_id=spreadsheet_id,\n mimeType=mimeType[\"PDF\"],\n save_as=\"{0}.pdf\".format(spreadsheet_name),\n )\n elif save_as[-5:] == \".xlsx\":\n result = self.download_file(\n file_id=spreadsheet_id, mimeType=mimeType[\"Excel\"], save_as=save_as\n )\n elif save_as[-4:] == \".csv\":\n result = self.download_file(\n file_id=spreadsheet_id, mimeType=mimeType[\"CSV\"], save_as=save_as\n )\n elif save_as[-4:] == \".pdf\":\n result = self.download_file(\n file_id=spreadsheet_id, mimeType=mimeType[\"PDF\"], save_as=save_as\n )\n else:\n raise Exception(\n textwrap.dedent(\n \"\"\"\\\n {0} is not a supported file format.\n Please check the `GoogleSheet.download_spreadsheet()` docstring.\n Or you may want to use `GoogleDrive.download_file()` method.\\\n \"\"\".format(\n save_as\n )\n )\n )\n return result\n\n def get_values(\n self,\n spreadsheet_id: str,\n range_,\n value_render_option=None,\n date_time_render_option=None,\n ):\n \"\"\"\n Get a single value, a range of values, and several ranges of values.\n \n Use `GoogleSheet.download_spreadsheet()` if you want to get the\n entire spreadsheet.\n \n Official API guide:\n For single range:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get\n For multiple ranges:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet\n \n Example:\n Get the entire sheet of `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'\")\n\n Get the value of cell `A5` in `Sheet 1`.\n >>> gs.get_values(spreadsheet_id, \"'Sheet 1'!A5\")\n\n Args:\n spreadsheet_id: String\n range_: String or List of strings in A1 notation\n String: A single sheet, A single range\n List of strings: Several ranges\n value_render_option: String, default None\n How values should be represented in the output.\n The default render option is `ValueRenderOption.FORMATTED_VALUE`.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n date_time_render_option: String, default None\n How dates, times, and durations should be represented in the output.\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/ValueRenderOption\n\n Return:\n ValueRange in Dictionary\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#resource:-valuerange\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n\n # How values should be represented in the output.\n # The default render option is ValueRenderOption.FORMATTED_VALUE.\n value_render_option = value_render_option\n\n # How dates, times, and durations should be represented in the output.\n # This is ignored if value_render_option is\n # FORMATTED_VALUE.\n # The default dateTime render option is [DateTimeRenderOption.SERIAL_NUMBER].\n date_time_render_option = date_time_render_option\n\n request = (\n service.spreadsheets()\n .values()\n .batchGet(\n spreadsheetId=spreadsheet_id,\n ranges=range_,\n valueRenderOption=value_render_option,\n dateTimeRenderOption=date_time_render_option,\n )\n )\n result = request.execute()\n\n return result\n\n def clear_values(self, spreadsheet_id: str, range_):\n \"\"\"\n Clear values from a spreadsheet.\n \n Only values are cleared -- all other properties of \n the cell (such as formatting, data validation, etc..) are kept.\n \n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear\n \n Args:\n spreadsheet_id: String\n range_: String, A1 notation\n\n Return:\n Dictionary, cleared range\n {\n \"spreadsheetId\": string,\n \"clearedRange\": string\n }\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n\n batch_clear_values_request_body = {\n # The ranges to clear, in A1 notation.\n \"ranges\": range_\n # TODO: Add desired entries to the request body.\n }\n\n request = (\n service.spreadsheets()\n .values()\n .batchClear(\n spreadsheetId=spreadsheet_id, body=batch_clear_values_request_body\n )\n )\n result = request.execute()\n\n return result\n\n def update_values(self, spreadsheet_id: str, data, value_input_option=\"RAW\"):\n \"\"\"\n Sets values in a range of a spreadsheet.\n\n Official API guide:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update\n\n Args:\n spreadsheet_id: String\n data: ValueRange in Dictionary\n {\n \"range\": string,\n \"majorDimension\": enum (Dimension),\n \"values\": [\n array\n ]\n }\n Details:\n https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values#ValueRange\n \n Return:\n Dictionary in structure:\n {\n \"spreadsheetId\": string,\n \"totalUpdatedRows\": integer,\n \"totalUpdatedColumns\": integer,\n \"totalUpdatedCells\": integer,\n \"totalUpdatedSheets\": integer,\n \"responses\": [\n {\n object (UpdateValuesResponse)\n }\n ]\n }\n \"\"\"\n service = build(\"sheets\", \"v4\", credentials=self.creds)\n batch_update_values_request_body = {\n # How the input data should be interpreted.\n \"value_input_option\": value_input_option, # 'USER_ENTERED'\n # The new values to apply to the spreadsheet.\n \"data\": data,\n }\n\n request = (\n service.spreadsheets()\n .values()\n .batchUpdate(\n spreadsheetId=spreadsheet_id, body=batch_update_values_request_body\n )\n )\n result = request.execute()\n\n return result\n\n def update_column_format(self):\n \"\"\"\n Update the column format.\n \n Supported format: date, number, currency\n \n Officail API guide:\n https://developers.google.com/sheets/api/samples/formatting\n https://developers.google.com/sheets/api/guides/formats\n https://developers.google.com/sheets/api/guides/batchupdate\n \"\"\"\n pass\n",
"step-ids": [
4,
8,
10,
12,
13
]
}
|
[
4,
8,
10,
12,
13
] |
''' This module creates the models/tables in the database
catalog using sqlalchemy '''
from catalog import db
class Items(db.Model):
''' Model to store all the information about an item '''
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String)
item = db.Column(db.String, nullable=False)
description = db.Column(db.String, nullable=False)
image = db.Column(db.String)
category = db.Column(db.String, nullable=False)
price = db.Column(db.String, nullable=False)
def __init__(self, email, item, description, image, category, price):
self.email = email
self.item = item
self.description = description
self.image = image
self.category = category
self.price = price
@property
def serialize(self):
''' Function to return a json object for each
instance of the class Items '''
return { 'id': self.id,
'item': self.item,
'description': self.description,
'image': self.image,
'category': self.category,
'price': self.price }
def __repr__(self):
''' Functon to represent the class instance '''
return '<item {}>'.format(self.item)
|
normal
|
{
"blob_id": "ad622ff2e1d9286246b2175694a9ae796f8d2557",
"index": 7535,
"step-1": "<mask token>\n\n\nclass Items(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, email, item, description, image, category, price):\n self.email = email\n self.item = item\n self.description = description\n self.image = image\n self.category = category\n self.price = price\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Items(db.Model):\n <mask token>\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String)\n item = db.Column(db.String, nullable=False)\n description = db.Column(db.String, nullable=False)\n image = db.Column(db.String)\n category = db.Column(db.String, nullable=False)\n price = db.Column(db.String, nullable=False)\n\n def __init__(self, email, item, description, image, category, price):\n self.email = email\n self.item = item\n self.description = description\n self.image = image\n self.category = category\n self.price = price\n\n @property\n def serialize(self):\n \"\"\" Function to return a json object for each \n instance of the class Items \"\"\"\n return {'id': self.id, 'item': self.item, 'description': self.\n description, 'image': self.image, 'category': self.category,\n 'price': self.price}\n\n def __repr__(self):\n \"\"\" Functon to represent the class instance \"\"\"\n return '<item {}>'.format(self.item)\n",
"step-3": "<mask token>\n\n\nclass Items(db.Model):\n \"\"\" Model to store all the information about an item \"\"\"\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String)\n item = db.Column(db.String, nullable=False)\n description = db.Column(db.String, nullable=False)\n image = db.Column(db.String)\n category = db.Column(db.String, nullable=False)\n price = db.Column(db.String, nullable=False)\n\n def __init__(self, email, item, description, image, category, price):\n self.email = email\n self.item = item\n self.description = description\n self.image = image\n self.category = category\n self.price = price\n\n @property\n def serialize(self):\n \"\"\" Function to return a json object for each \n instance of the class Items \"\"\"\n return {'id': self.id, 'item': self.item, 'description': self.\n description, 'image': self.image, 'category': self.category,\n 'price': self.price}\n\n def __repr__(self):\n \"\"\" Functon to represent the class instance \"\"\"\n return '<item {}>'.format(self.item)\n",
"step-4": "<mask token>\nfrom catalog import db\n\n\nclass Items(db.Model):\n \"\"\" Model to store all the information about an item \"\"\"\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String)\n item = db.Column(db.String, nullable=False)\n description = db.Column(db.String, nullable=False)\n image = db.Column(db.String)\n category = db.Column(db.String, nullable=False)\n price = db.Column(db.String, nullable=False)\n\n def __init__(self, email, item, description, image, category, price):\n self.email = email\n self.item = item\n self.description = description\n self.image = image\n self.category = category\n self.price = price\n\n @property\n def serialize(self):\n \"\"\" Function to return a json object for each \n instance of the class Items \"\"\"\n return {'id': self.id, 'item': self.item, 'description': self.\n description, 'image': self.image, 'category': self.category,\n 'price': self.price}\n\n def __repr__(self):\n \"\"\" Functon to represent the class instance \"\"\"\n return '<item {}>'.format(self.item)\n",
"step-5": "''' This module creates the models/tables in the database \r\n catalog using sqlalchemy '''\r\n\r\nfrom catalog import db\r\n\r\n\r\nclass Items(db.Model):\r\n ''' Model to store all the information about an item '''\r\n\r\n id = db.Column(db.Integer, primary_key=True)\r\n email = db.Column(db.String)\r\n item = db.Column(db.String, nullable=False)\r\n description = db.Column(db.String, nullable=False)\r\n image = db.Column(db.String)\r\n category = db.Column(db.String, nullable=False)\r\n price = db.Column(db.String, nullable=False)\r\n\r\n\r\n def __init__(self, email, item, description, image, category, price):\r\n\r\n self.email = email\r\n self.item = item\r\n self.description = description\r\n self.image = image\r\n self.category = category\r\n self.price = price\r\n\r\n\r\n @property\r\n def serialize(self):\r\n ''' Function to return a json object for each \r\n instance of the class Items '''\r\n\r\n return { 'id': self.id,\r\n 'item': self.item,\r\n 'description': self.description,\r\n 'image': self.image,\r\n 'category': self.category,\r\n 'price': self.price }\r\n\r\n\r\n def __repr__(self):\r\n ''' Functon to represent the class instance '''\r\n\r\n return '<item {}>'.format(self.item)\r\n",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class _WATERWAYS:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class _WATERWAYS:
def __init__(self):
self.name = 'WATERWAYS'
self.definitions = waterway
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['waterway']
<|reserved_special_token_1|>
#calss header
class _WATERWAYS():
def __init__(self,):
self.name = "WATERWAYS"
self.definitions = waterway
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['waterway']
|
flexible
|
{
"blob_id": "33daf5753b27f6b4bcb7c98e28cf2168e7f0b403",
"index": 9541,
"step-1": "<mask token>\n",
"step-2": "class _WATERWAYS:\n <mask token>\n",
"step-3": "class _WATERWAYS:\n\n def __init__(self):\n self.name = 'WATERWAYS'\n self.definitions = waterway\n self.parents = []\n self.childen = []\n self.properties = []\n self.jsondata = {}\n self.basic = ['waterway']\n",
"step-4": "\n\n#calss header\nclass _WATERWAYS():\n\tdef __init__(self,): \n\t\tself.name = \"WATERWAYS\"\n\t\tself.definitions = waterway\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.basic = ['waterway']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_weather(request):
form = WeatherForm()
error = ''
output = {}
if request.method == 'POST':
form = WeatherForm(request.POST)
if form.is_valid():
data = form.cleaned_data
latitude = data['latitude']
longitude = data['longitude']
url = settings.WEATHER_URL
url += 'weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s' % (
latitude, longitude)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64;'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
output = response.json()
else:
error = response.text
return render(request=request, template_name='core/weather.html',
context={'form': form, 'error': error, 'output': output})
<|reserved_special_token_1|>
from django.http.response import HttpResponse
from django.shortcuts import render, HttpResponse
import requests
from django.conf import settings
from .forms import WeatherForm
def get_weather(request):
form = WeatherForm()
error = ''
output = {}
if request.method == 'POST':
form = WeatherForm(request.POST)
if form.is_valid():
data = form.cleaned_data
latitude = data['latitude']
longitude = data['longitude']
url = settings.WEATHER_URL
url += 'weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s' % (
latitude, longitude)
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64;'}
response = requests.get(url, headers=headers)
if response.status_code == 200:
output = response.json()
else:
error = response.text
return render(request=request, template_name='core/weather.html',
context={'form': form, 'error': error, 'output': output})
<|reserved_special_token_1|>
from django.http.response import HttpResponse
from django.shortcuts import render , HttpResponse
import requests
from django.conf import settings
from .forms import WeatherForm
# Create your views here.
def get_weather(request):
form = WeatherForm()
error = ""
output = {}
if request.method == 'POST':
form = WeatherForm(request.POST)
if form.is_valid():
data = form.cleaned_data
latitude = data['latitude']
longitude = data['longitude']
url = settings.WEATHER_URL
url += "weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s"%(latitude,longitude)
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64;"}
response = requests.get(url,headers=headers)
if response.status_code == 200:
output = response.json()
else:
error = response.text
return render(request=request,template_name="core/weather.html", context= {'form':form ,
'error':error , "output":output})
|
flexible
|
{
"blob_id": "be5a683309317f1f6ebc20ad3511fd2b2510e806",
"index": 5535,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_weather(request):\n form = WeatherForm()\n error = ''\n output = {}\n if request.method == 'POST':\n form = WeatherForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n latitude = data['latitude']\n longitude = data['longitude']\n url = settings.WEATHER_URL\n url += 'weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s' % (\n latitude, longitude)\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64;'}\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n output = response.json()\n else:\n error = response.text\n return render(request=request, template_name='core/weather.html',\n context={'form': form, 'error': error, 'output': output})\n",
"step-3": "from django.http.response import HttpResponse\nfrom django.shortcuts import render, HttpResponse\nimport requests\nfrom django.conf import settings\nfrom .forms import WeatherForm\n\n\ndef get_weather(request):\n form = WeatherForm()\n error = ''\n output = {}\n if request.method == 'POST':\n form = WeatherForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n latitude = data['latitude']\n longitude = data['longitude']\n url = settings.WEATHER_URL\n url += 'weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s' % (\n latitude, longitude)\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64;'}\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n output = response.json()\n else:\n error = response.text\n return render(request=request, template_name='core/weather.html',\n context={'form': form, 'error': error, 'output': output})\n",
"step-4": "from django.http.response import HttpResponse\nfrom django.shortcuts import render , HttpResponse\nimport requests\nfrom django.conf import settings\nfrom .forms import WeatherForm\n# Create your views here.\n\ndef get_weather(request):\n form = WeatherForm()\n error = \"\"\n output = {}\n if request.method == 'POST':\n form = WeatherForm(request.POST)\n if form.is_valid():\n data = form.cleaned_data\n latitude = data['latitude']\n longitude = data['longitude']\n url = settings.WEATHER_URL\n url += \"weatherapi/locationforecast/2.0/compact?lat=%s&lon=%s\"%(latitude,longitude)\n headers = {\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64;\"}\n response = requests.get(url,headers=headers)\n if response.status_code == 200:\n output = response.json()\n else:\n error = response.text\n return render(request=request,template_name=\"core/weather.html\", context= {'form':form ,\n 'error':error , \"output\":output})",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import requests
import json
import logging
import time
from alto.server.components.datasource import DBInfo, DataSourceAgent
class CRICAgent(DataSourceAgent):
def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):
super().__init__(dbinfo, name, namespace)
self.uri = self.ensure_field(cfg, 'uri')
self.local_asn = cfg.get('local_asn', None)
self.refresh_interval = cfg.get('refresh_interval', None)
self.netroute_map = dict()
logging.info("Loading databases")
self.db = [ self.request_db(t) for t in ['endpoint']]
def update(self):
eb_trans = self.db[0].new_transaction()
cric_dict = dict()
if self.uri.startswith('http'):
data = requests.get(self.uri, verify=False)
cric_dict = json.loads(data.content)
else:
with open(self.uri, 'r') as f_cric:
cric_dict = json.load(f_cric)
for _, rcsite_obj in cric_dict.items():
netroutes = rcsite_obj.get('netroutes', dict())
for _, netroute in netroutes.items():
for _, ipprefixes in netroute['networks'].items():
for ipprefix in ipprefixes:
asn = netroute.get('asn')
if asn == self.local_asn:
eb_trans.add_property(ipprefix, {'is_local': True})
eb_trans.commit()
def run(self):
if self.refresh_interval is None:
self.refresh_interval = 60
while True:
self.update()
time.sleep(self.refresh_interval)
|
normal
|
{
"blob_id": "55c00ce4c1657dc5ce78e5eeccd8e9625c0590dc",
"index": 5345,
"step-1": "<mask token>\n\n\nclass CRICAgent(DataSourceAgent):\n <mask token>\n <mask token>\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-2": "<mask token>\n\n\nclass CRICAgent(DataSourceAgent):\n\n def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):\n super().__init__(dbinfo, name, namespace)\n self.uri = self.ensure_field(cfg, 'uri')\n self.local_asn = cfg.get('local_asn', None)\n self.refresh_interval = cfg.get('refresh_interval', None)\n self.netroute_map = dict()\n logging.info('Loading databases')\n self.db = [self.request_db(t) for t in ['endpoint']]\n <mask token>\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-3": "<mask token>\n\n\nclass CRICAgent(DataSourceAgent):\n\n def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):\n super().__init__(dbinfo, name, namespace)\n self.uri = self.ensure_field(cfg, 'uri')\n self.local_asn = cfg.get('local_asn', None)\n self.refresh_interval = cfg.get('refresh_interval', None)\n self.netroute_map = dict()\n logging.info('Loading databases')\n self.db = [self.request_db(t) for t in ['endpoint']]\n\n def update(self):\n eb_trans = self.db[0].new_transaction()\n cric_dict = dict()\n if self.uri.startswith('http'):\n data = requests.get(self.uri, verify=False)\n cric_dict = json.loads(data.content)\n else:\n with open(self.uri, 'r') as f_cric:\n cric_dict = json.load(f_cric)\n for _, rcsite_obj in cric_dict.items():\n netroutes = rcsite_obj.get('netroutes', dict())\n for _, netroute in netroutes.items():\n for _, ipprefixes in netroute['networks'].items():\n for ipprefix in ipprefixes:\n asn = netroute.get('asn')\n if asn == self.local_asn:\n eb_trans.add_property(ipprefix, {'is_local': True})\n eb_trans.commit()\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-4": "import requests\nimport json\nimport logging\nimport time\nfrom alto.server.components.datasource import DBInfo, DataSourceAgent\n\n\nclass CRICAgent(DataSourceAgent):\n\n def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):\n super().__init__(dbinfo, name, namespace)\n self.uri = self.ensure_field(cfg, 'uri')\n self.local_asn = cfg.get('local_asn', None)\n self.refresh_interval = cfg.get('refresh_interval', None)\n self.netroute_map = dict()\n logging.info('Loading databases')\n self.db = [self.request_db(t) for t in ['endpoint']]\n\n def update(self):\n eb_trans = self.db[0].new_transaction()\n cric_dict = dict()\n if self.uri.startswith('http'):\n data = requests.get(self.uri, verify=False)\n cric_dict = json.loads(data.content)\n else:\n with open(self.uri, 'r') as f_cric:\n cric_dict = json.load(f_cric)\n for _, rcsite_obj in cric_dict.items():\n netroutes = rcsite_obj.get('netroutes', dict())\n for _, netroute in netroutes.items():\n for _, ipprefixes in netroute['networks'].items():\n for ipprefix in ipprefixes:\n asn = netroute.get('asn')\n if asn == self.local_asn:\n eb_trans.add_property(ipprefix, {'is_local': True})\n eb_trans.commit()\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-5": "import requests\nimport json\nimport logging\nimport time\n\nfrom alto.server.components.datasource import DBInfo, DataSourceAgent\n\nclass CRICAgent(DataSourceAgent):\n\n def __init__(self, dbinfo: DBInfo, name: str, namespace='default', **cfg):\n super().__init__(dbinfo, name, namespace)\n\n self.uri = self.ensure_field(cfg, 'uri')\n self.local_asn = cfg.get('local_asn', None)\n self.refresh_interval = cfg.get('refresh_interval', None)\n self.netroute_map = dict()\n\n logging.info(\"Loading databases\")\n self.db = [ self.request_db(t) for t in ['endpoint']]\n\n def update(self):\n eb_trans = self.db[0].new_transaction()\n cric_dict = dict()\n if self.uri.startswith('http'):\n data = requests.get(self.uri, verify=False)\n cric_dict = json.loads(data.content)\n else:\n with open(self.uri, 'r') as f_cric:\n cric_dict = json.load(f_cric)\n\n for _, rcsite_obj in cric_dict.items():\n netroutes = rcsite_obj.get('netroutes', dict())\n for _, netroute in netroutes.items():\n for _, ipprefixes in netroute['networks'].items():\n for ipprefix in ipprefixes:\n asn = netroute.get('asn')\n if asn == self.local_asn:\n eb_trans.add_property(ipprefix, {'is_local': True})\n eb_trans.commit()\n\n def run(self):\n if self.refresh_interval is None:\n self.refresh_interval = 60\n while True:\n self.update()\n time.sleep(self.refresh_interval)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python3
# crits_ldap.py
# This connects to an LDAP server and pulls data about all users.
# Then, it either updates existing targets in CRITS or creates a new entry.
import json
import sys
import datetime
import logging
import logging.config
from configparser import ConfigParser
from ldap3 import Server, Connection, SIMPLE, SYNC, ASYNC, SUBTREE, ALL,
ALL_ATTRIBUTES
from pymongo import MongoClient
try:
config = ConfigParser()
config.read('etc/ldap.ini')
except ImportError:
raise SystemExit('ldap.ini was not found or was not accessible.')
try:
logging.config.fileConfig('etc/logging.ini')
log = logging.getLogger("ldap")
except Exception as e:
raise SystemExit("unable to load logging configuration file {0}: {1}"
.format('logging.ini', str(e)))
# load ldap settings from configuration file
ldap_server = config.get('ldap', 'ldap_server')
ldap_bind_user = config.get('ldap', 'ldap_bind_user')
ldap_bind_password = config.get('ldap', 'ldap_bind_password')
ldap_base_dn = config.get('ldap', 'ldap_base_dn')
crits_user = config.get('crits', 'user')
crits_server = config.get('crits', 'server')
TARGET_SCHEMA_VERSION = 3
def ldap_paged_query(query):
response_entries = []
try:
server = Server(ldap_server, port = 389, get_info = ALL)
with Connection(
server,
client_strategy = SYNC,
user=ldap_bind_user,
password=ldap_bind_password,
authentication=SIMPLE,
check_names=True) as c:
log.debug("running ldap query for ({0})".format(query))
c.search(ldap_base_dn, '({0})'.format(query), SUBTREE, attributes =
ALL_ATTRIBUTES, paged_criticality=True, paged_size=100)
cookie = c.result['controls']['1.2.840.113556.1.4.319']['value']
['cookie']
# a little hack to move the result into json
response = json.loads(c.response_to_json())
if len(response['entries']) < 1:
return None
for entry in response['entries']:
response_entries.append(entry)
while cookie:
c.search(ldap_base_dn, '({0})'.format(query), SUBTREE,
attributes = ALL_ATTRIBUTES, paged_criticality=True,
paged_size=100, paged_cookie=cookie)
# a little hack to move the result into json
cookie = c.result['controls']['1.2.840.113556.1.4.319']
['value']['cookie']
response = json.loads(c.response_to_json())
if len(response['entries']) < 1:
return None
for entry in response['entries']:
response_entries.append(entry)
return response_entries
except Exception as e:
log.warning("unable to perform ldap query: {0}".format(str(e)))
return response_entries
def add_results_to_crits(entries):
"""Adds LDAP data to CRITS targets.
Args:
entries: dict with all the entry data from LDAP
"""
client = MongoClient(crits_server, 27017)
db = client.crits
targets = db.targets
for result in entries:
firstname = ''
lastname = ''
if 'givenName' in result['attributes']:
firstname = result['attributes']['givenName']
if 'sn' in result['attributes']:
lastname = result['attributes']['sn']
department = ''
if 'department' in result['attributes']:
department = result['attributes']['department']
orgid = ''
if 'cn' in result['attributes']:
orgid = result['attributes']['cn']
company = ''
if 'company' in result['attributes']:
company = result['attributes']['company']
title = ''
if 'title' in result['attributes']:
title = result['attributes']['title']
mail = ''
if 'mail' in result['attributes']:
mail = result['attributes']['mail']
tmpmail = str.strip(mail)
if tmpmail == '':
continue
mongo_result = targets.find_one( { 'email_address' : mail.lower() } )
if mongo_result:
log.debug('Found id of {} for the target {}'.format(
mongo_result['_id'], mail))
modified = datetime.datetime.now()
data = {
'firstname' : firstname,
'lastname' : lastname,
'division' : company,
'department' : department,
'organization_id' : orgid,
'title' : title,
'modified' : modified
}
# The user is already in crits, do we need to
# update any information?
update_information = False
for key in data.keys():
if key == 'modified':
continue
if key in mongo_result:
if mongo_result[key] != data[key]:
update_information = True
else:
update_information = True
if update_information:
update_result = targets.update_one( { 'email_address' :
mail.lower() }, { '$set' : data } )
log.info("Records matched: {}, modified: {}, email_address: {}"
.format(update_result.matched_count,
update_result.modified_count, mail.lower()))
else:
# The user is not in CRITS, let's add the information
created = datetime.datetime.now()
data = {
"status" : "New",
"created" : created,
"modified" : created,
"schema_version" : TARGET_SCHEMA_VERSION,
"actions" : [ ],
"tickets" : [ ],
"bucket_list" : [ ],
"campaign" : [ ],
"locations" : [ ],
"objects" : [ ],
"relationships" : [ ],
"releasability" : [ ],
"screenshots" : [ ],
"sectors" : [ ],
"email_address" : mail.lower(),
"email_count" : 0,
'firstname' : firstname,
'lastname' : lastname,
'division' : company,
'department' : department,
'organization_id' : orgid,
'title' : title,
'note' : ''
}
insert_result = targets.insert_one( data )
if insert_result:
log.info("Record inserted: {}".format(
insert_result.inserted_id ))
else:
log.error("Insert failed for {}".format(mail.lower()))
log.info('Beginning LDAP update.')
# Before we do anything, we need to connect to the crits server and make sure
# the schema version is the same for our target collection
client = MongoClient(crits_server, 27017)
db = client.crits
targets = db.targets
tmp_target = targets.find_one()
if 'schema_version' not in tmp_target:
log.error("schema_version not found in target result.")
sys.exit(1)
if tmp_target['schema_version'] != TARGET_SCHEMA_VERSION:
log.error("schema_version has changed (found {}, expected {}). Check "
"CRITS target table.".format(tmp_target['schema_version'],
TARGET_SCHEMA_VERSION))
sys.exit(1)
log.info('Running LDAP query.')
results = ldap_paged_query("mail=*")
if results is not None:
add_results_to_crits(results)
else:
log.info("No results returned from LDAP")
log.info('LDAP update complete.')
|
normal
|
{
"blob_id": "5d568c5ac9040ad93749c27bd6fe1a956e7456f7",
"index": 9016,
"step-1": "#!/usr/bin/env python3\n# crits_ldap.py\n# This connects to an LDAP server and pulls data about all users.\n# Then, it either updates existing targets in CRITS or creates a new entry.\n\nimport json\nimport sys\nimport datetime\nimport logging\nimport logging.config\n\nfrom configparser import ConfigParser\nfrom ldap3 import Server, Connection, SIMPLE, SYNC, ASYNC, SUBTREE, ALL,\n ALL_ATTRIBUTES\nfrom pymongo import MongoClient\n\ntry:\n config = ConfigParser()\n config.read('etc/ldap.ini')\nexcept ImportError:\n raise SystemExit('ldap.ini was not found or was not accessible.')\n\ntry:\n logging.config.fileConfig('etc/logging.ini')\n log = logging.getLogger(\"ldap\")\nexcept Exception as e:\n raise SystemExit(\"unable to load logging configuration file {0}: {1}\"\n .format('logging.ini', str(e)))\n\n# load ldap settings from configuration file\nldap_server = config.get('ldap', 'ldap_server')\nldap_bind_user = config.get('ldap', 'ldap_bind_user')\nldap_bind_password = config.get('ldap', 'ldap_bind_password')\nldap_base_dn = config.get('ldap', 'ldap_base_dn')\ncrits_user = config.get('crits', 'user')\ncrits_server = config.get('crits', 'server')\nTARGET_SCHEMA_VERSION = 3\n\ndef ldap_paged_query(query):\n response_entries = []\n\n try:\n server = Server(ldap_server, port = 389, get_info = ALL)\n with Connection(\n server,\n client_strategy = SYNC,\n user=ldap_bind_user,\n password=ldap_bind_password,\n authentication=SIMPLE,\n check_names=True) as c:\n\n log.debug(\"running ldap query for ({0})\".format(query))\n c.search(ldap_base_dn, '({0})'.format(query), SUBTREE, attributes =\n ALL_ATTRIBUTES, paged_criticality=True, paged_size=100)\n\n cookie = c.result['controls']['1.2.840.113556.1.4.319']['value']\n ['cookie']\n # a little hack to move the result into json\n response = json.loads(c.response_to_json())\n\n if len(response['entries']) < 1:\n return None\n\n for entry in response['entries']:\n response_entries.append(entry)\n\n while cookie:\n c.search(ldap_base_dn, '({0})'.format(query), SUBTREE,\n attributes = ALL_ATTRIBUTES, paged_criticality=True,\n paged_size=100, paged_cookie=cookie)\n # a little hack to move the result into json\n cookie = c.result['controls']['1.2.840.113556.1.4.319']\n ['value']['cookie']\n\n response = json.loads(c.response_to_json())\n if len(response['entries']) < 1:\n return None\n\n for entry in response['entries']:\n response_entries.append(entry)\n\n return response_entries\n\n except Exception as e:\n log.warning(\"unable to perform ldap query: {0}\".format(str(e)))\n return response_entries\n\n\ndef add_results_to_crits(entries):\n \"\"\"Adds LDAP data to CRITS targets.\n\n Args:\n entries: dict with all the entry data from LDAP\n \"\"\"\n client = MongoClient(crits_server, 27017)\n db = client.crits\n targets = db.targets\n for result in entries:\n firstname = ''\n lastname = ''\n if 'givenName' in result['attributes']:\n firstname = result['attributes']['givenName']\n if 'sn' in result['attributes']:\n lastname = result['attributes']['sn']\n department = ''\n if 'department' in result['attributes']:\n department = result['attributes']['department']\n orgid = ''\n if 'cn' in result['attributes']:\n orgid = result['attributes']['cn']\n company = ''\n if 'company' in result['attributes']:\n company = result['attributes']['company']\n title = ''\n if 'title' in result['attributes']:\n title = result['attributes']['title']\n mail = ''\n if 'mail' in result['attributes']:\n mail = result['attributes']['mail']\n tmpmail = str.strip(mail)\n if tmpmail == '':\n continue\n mongo_result = targets.find_one( { 'email_address' : mail.lower() } )\n if mongo_result:\n log.debug('Found id of {} for the target {}'.format(\n mongo_result['_id'], mail))\n modified = datetime.datetime.now()\n data = {\n 'firstname' : firstname,\n 'lastname' : lastname,\n 'division' : company,\n 'department' : department,\n 'organization_id' : orgid,\n 'title' : title,\n 'modified' : modified\n }\n # The user is already in crits, do we need to\n # update any information?\n update_information = False\n for key in data.keys():\n if key == 'modified':\n continue\n if key in mongo_result:\n if mongo_result[key] != data[key]:\n update_information = True\n else:\n update_information = True\n\n if update_information:\n update_result = targets.update_one( { 'email_address' :\n mail.lower() }, { '$set' : data } )\n log.info(\"Records matched: {}, modified: {}, email_address: {}\"\n .format(update_result.matched_count,\n update_result.modified_count, mail.lower()))\n else:\n # The user is not in CRITS, let's add the information\n created = datetime.datetime.now()\n data = {\n \"status\" : \"New\",\n \"created\" : created,\n \"modified\" : created,\n \"schema_version\" : TARGET_SCHEMA_VERSION,\n \"actions\" : [ ],\n \"tickets\" : [ ],\n \"bucket_list\" : [ ],\n \"campaign\" : [ ],\n \"locations\" : [ ],\n \"objects\" : [ ],\n \"relationships\" : [ ],\n \"releasability\" : [ ],\n \"screenshots\" : [ ],\n \"sectors\" : [ ],\n \"email_address\" : mail.lower(),\n \"email_count\" : 0,\n 'firstname' : firstname,\n 'lastname' : lastname,\n 'division' : company,\n 'department' : department,\n 'organization_id' : orgid,\n 'title' : title,\n 'note' : ''\n }\n insert_result = targets.insert_one( data )\n if insert_result:\n log.info(\"Record inserted: {}\".format(\n insert_result.inserted_id ))\n else:\n log.error(\"Insert failed for {}\".format(mail.lower()))\n\nlog.info('Beginning LDAP update.')\n# Before we do anything, we need to connect to the crits server and make sure\n# the schema version is the same for our target collection\nclient = MongoClient(crits_server, 27017)\ndb = client.crits\ntargets = db.targets\n\ntmp_target = targets.find_one()\nif 'schema_version' not in tmp_target:\n log.error(\"schema_version not found in target result.\")\n sys.exit(1)\nif tmp_target['schema_version'] != TARGET_SCHEMA_VERSION:\n log.error(\"schema_version has changed (found {}, expected {}). Check \"\n \"CRITS target table.\".format(tmp_target['schema_version'],\n TARGET_SCHEMA_VERSION))\n sys.exit(1)\n\nlog.info('Running LDAP query.')\nresults = ldap_paged_query(\"mail=*\")\nif results is not None:\n add_results_to_crits(results)\nelse:\n log.info(\"No results returned from LDAP\")\n\nlog.info('LDAP update complete.')\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
import sys
def get_params(fname):
d = dict()
with open(fname) as f:
for line in f:
l = line.strip()
if (line[0] == '#'):
continue
param = line.split('=')
v = ' '.join(param[1:])
d[param[0]] = v.strip('\n')
return d
usage_text = "Compares boot configs of two kernels\n" \
"Usage: {0} <filename1> <filename2>".format(sys.argv[0])
try:
f1 = sys.argv[1]
f2 = sys.argv[2]
except:
print usage_text
exit()
params1 = get_params(f1)
params2 = get_params(f2)
param_names = set([key for key in params1]) | set([key for key in params2])
the_first = True
f_output = "{0:80}{1:40}{2:40}"
for param in param_names:
try:
val1 = params1[param]
except KeyError:
val1 = '-'
try:
val2 = params2[param]
except KeyError:
val2 = '-'
if (val1 != val2):
if the_first:
print(f_output.format("Param name", f1, f2))
print "-"*140
the_first = False
print (f_output.format(param, val1, val2))
|
normal
|
{
"blob_id": "d287a5128ca9352b2edc459c9e42a57ef800ec9c",
"index": 7657,
"step-1": "#!/usr/bin/python\n\nimport sys\n\ndef get_params(fname):\n\td = dict()\n\twith open(fname) as f:\n\t\tfor line in f:\n\t\t\tl = line.strip()\n\t\t\tif (line[0] == '#'):\n\t\t\t\tcontinue\n\t\t\tparam = line.split('=')\n\t\t\tv = ' '.join(param[1:])\n\t\t\td[param[0]] = v.strip('\\n') \n\treturn d\n\nusage_text = \"Compares boot configs of two kernels\\n\" \\\n\t\"Usage: {0} <filename1> <filename2>\".format(sys.argv[0])\ntry:\n\tf1 = sys.argv[1]\n\tf2 = sys.argv[2]\nexcept:\n\tprint usage_text\n\texit()\n\nparams1 = get_params(f1)\nparams2 = get_params(f2)\n\nparam_names = set([key for key in params1]) | set([key for key in params2])\n\n\nthe_first = True\nf_output = \"{0:80}{1:40}{2:40}\"\n\nfor param in param_names:\n\ttry:\n\t\tval1 = params1[param]\n\texcept KeyError:\n\t\tval1 = '-'\n\t\t\n\ttry:\n\t\tval2 = params2[param]\n\texcept KeyError:\n\t\tval2 = '-'\n\n\tif (val1 != val2):\n\t\tif the_first:\n\t\t\tprint(f_output.format(\"Param name\", f1, f2))\n\t\t\tprint \"-\"*140\n\t\t\tthe_first = False\n\n\t\tprint (f_output.format(param, val1, val2))\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Client_OrderInline(admin.TabularInline):
<|reserved_special_token_0|>
class MyAdminSite(AdminSite):
site_header = 'Pizza-Day'
index_template = 'admin/index.html'
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (
'4', 'от 450')
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte=0, price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte=200, price__lte=299)
return go()
if self.value() == '3':
return queryset.filter(price__gte=300, price__lte=449)
if self.value() == '4':
return queryset.filter(price__gte=200, price__lte=299)
list_display = ('get_image_html', 'id', 'section_id', 'title',
'getSize', 'getPrice')
list_displat_links = ''
inlines = [ProductCompositionInline]
search_fields = ['title']
list_filter = ['section_id', PriceListFilter]
def get_image_html(self, obj):
return format_html(
'<img src = "{}" style = "height: 30px; border-radius: 5px;"></img>'
, obj.image.url)
get_image_html.short_description = 'Фото товара'
def getPrice(self, obj):
try:
object = Stock.objects.get(id=obj.id, status=True)
return format_html('<del>{} грн.</del> <span>{} грн. </span>'.
format(obj.price, object.value))
except:
pass
return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')
getPrice.short_description = 'Цена'
def getSize(self, obj):
return str(obj.size) + obj.unitSize
getSize.short_description = 'Вес'
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (
'4', 'от 450')
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte=0, price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte=200, price__lte=299)
if self.value() == '3':
return queryset.filter(price__gte=300, price__lte=449)
if self.value() == '4':
return queryset.filter(price__gte=200, price__lte=299)
list_display = 'id', 'dateTimeOrder', 'price', 'status'
list_filter = ['dateTimeOrder', PriceListFilter, 'status']
list_editable = ['status']
inlines = [OrderInline]
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = 'id', 'name', 'phone_number'
inlines = [Client_OrderInline]
@admin.register(Section)
class SectionAdmin(admin.ModelAdmin):
list_display = 'id', 'title'
class StockAdmin(admin.ModelAdmin):
list_display = 'product_id', 'value', 'status'
search_fields = ['product_id__title']
list_filter = ['status']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Client_OrderInline(admin.TabularInline):
model = Order
class MyAdminSite(AdminSite):
site_header = 'Pizza-Day'
index_template = 'admin/index.html'
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (
'4', 'от 450')
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte=0, price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte=200, price__lte=299)
return go()
if self.value() == '3':
return queryset.filter(price__gte=300, price__lte=449)
if self.value() == '4':
return queryset.filter(price__gte=200, price__lte=299)
list_display = ('get_image_html', 'id', 'section_id', 'title',
'getSize', 'getPrice')
list_displat_links = ''
inlines = [ProductCompositionInline]
search_fields = ['title']
list_filter = ['section_id', PriceListFilter]
def get_image_html(self, obj):
return format_html(
'<img src = "{}" style = "height: 30px; border-radius: 5px;"></img>'
, obj.image.url)
get_image_html.short_description = 'Фото товара'
def getPrice(self, obj):
try:
object = Stock.objects.get(id=obj.id, status=True)
return format_html('<del>{} грн.</del> <span>{} грн. </span>'.
format(obj.price, object.value))
except:
pass
return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')
getPrice.short_description = 'Цена'
def getSize(self, obj):
return str(obj.size) + obj.unitSize
getSize.short_description = 'Вес'
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (
'4', 'от 450')
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte=0, price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte=200, price__lte=299)
if self.value() == '3':
return queryset.filter(price__gte=300, price__lte=449)
if self.value() == '4':
return queryset.filter(price__gte=200, price__lte=299)
list_display = 'id', 'dateTimeOrder', 'price', 'status'
list_filter = ['dateTimeOrder', PriceListFilter, 'status']
list_editable = ['status']
inlines = [OrderInline]
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = 'id', 'name', 'phone_number'
inlines = [Client_OrderInline]
@admin.register(Section)
class SectionAdmin(admin.ModelAdmin):
list_display = 'id', 'title'
class StockAdmin(admin.ModelAdmin):
list_display = 'product_id', 'value', 'status'
search_fields = ['product_id__title']
list_filter = ['status']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrderInline(admin.TabularInline):
<|reserved_special_token_0|>
class Client_OrderInline(admin.TabularInline):
model = Order
class MyAdminSite(AdminSite):
site_header = 'Pizza-Day'
index_template = 'admin/index.html'
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (
'4', 'от 450')
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte=0, price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte=200, price__lte=299)
return go()
if self.value() == '3':
return queryset.filter(price__gte=300, price__lte=449)
if self.value() == '4':
return queryset.filter(price__gte=200, price__lte=299)
list_display = ('get_image_html', 'id', 'section_id', 'title',
'getSize', 'getPrice')
list_displat_links = ''
inlines = [ProductCompositionInline]
search_fields = ['title']
list_filter = ['section_id', PriceListFilter]
def get_image_html(self, obj):
return format_html(
'<img src = "{}" style = "height: 30px; border-radius: 5px;"></img>'
, obj.image.url)
get_image_html.short_description = 'Фото товара'
def getPrice(self, obj):
try:
object = Stock.objects.get(id=obj.id, status=True)
return format_html('<del>{} грн.</del> <span>{} грн. </span>'.
format(obj.price, object.value))
except:
pass
return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')
getPrice.short_description = 'Цена'
def getSize(self, obj):
return str(obj.size) + obj.unitSize
getSize.short_description = 'Вес'
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (
'4', 'от 450')
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte=0, price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte=200, price__lte=299)
if self.value() == '3':
return queryset.filter(price__gte=300, price__lte=449)
if self.value() == '4':
return queryset.filter(price__gte=200, price__lte=299)
list_display = 'id', 'dateTimeOrder', 'price', 'status'
list_filter = ['dateTimeOrder', PriceListFilter, 'status']
list_editable = ['status']
inlines = [OrderInline]
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = 'id', 'name', 'phone_number'
inlines = [Client_OrderInline]
@admin.register(Section)
class SectionAdmin(admin.ModelAdmin):
list_display = 'id', 'title'
class StockAdmin(admin.ModelAdmin):
list_display = 'product_id', 'value', 'status'
search_fields = ['product_id__title']
list_filter = ['status']
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from obp.models import *
from django.utils.html import format_html
from jet.admin import CompactInline
from jet.dashboard import modules
from jet.dashboard.dashboard import Dashboard, AppIndexDashboard
class ProductCompositionInline(admin.TabularInline):
model = Product_composition
class OrderInline(admin.TabularInline):
model = Product_order
class Client_OrderInline(admin.TabularInline):
model = Order
class MyAdminSite(AdminSite):
site_header = 'Pizza-Day'
index_template = 'admin/index.html'
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (
'4', 'от 450')
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte=0, price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte=200, price__lte=299)
return go()
if self.value() == '3':
return queryset.filter(price__gte=300, price__lte=449)
if self.value() == '4':
return queryset.filter(price__gte=200, price__lte=299)
list_display = ('get_image_html', 'id', 'section_id', 'title',
'getSize', 'getPrice')
list_displat_links = ''
inlines = [ProductCompositionInline]
search_fields = ['title']
list_filter = ['section_id', PriceListFilter]
def get_image_html(self, obj):
return format_html(
'<img src = "{}" style = "height: 30px; border-radius: 5px;"></img>'
, obj.image.url)
get_image_html.short_description = 'Фото товара'
def getPrice(self, obj):
try:
object = Stock.objects.get(id=obj.id, status=True)
return format_html('<del>{} грн.</del> <span>{} грн. </span>'.
format(obj.price, object.value))
except:
pass
return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')
getPrice.short_description = 'Цена'
def getSize(self, obj):
return str(obj.size) + obj.unitSize
getSize.short_description = 'Вес'
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (
'4', 'от 450')
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte=0, price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte=200, price__lte=299)
if self.value() == '3':
return queryset.filter(price__gte=300, price__lte=449)
if self.value() == '4':
return queryset.filter(price__gte=200, price__lte=299)
list_display = 'id', 'dateTimeOrder', 'price', 'status'
list_filter = ['dateTimeOrder', PriceListFilter, 'status']
list_editable = ['status']
inlines = [OrderInline]
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = 'id', 'name', 'phone_number'
inlines = [Client_OrderInline]
@admin.register(Section)
class SectionAdmin(admin.ModelAdmin):
list_display = 'id', 'title'
class StockAdmin(admin.ModelAdmin):
list_display = 'product_id', 'value', 'status'
search_fields = ['product_id__title']
list_filter = ['status']
admin_site = MyAdminSite(name='myadmin')
admin_site.register(Product, ProductAdmin)
admin_site.register(Client, ClientAdmin)
admin_site.register(Order, OrderAdmin)
admin_site.register(Section, SectionAdmin)
admin_site.register(Stock, StockAdmin)
admin_site.register(Product_comment)
admin_site.register(Client_Auth)
admin_site.register(Special_offers)
admin_site.register(Product_rating)
<|reserved_special_token_1|>
from django.contrib import admin
from django.contrib.admin.sites import AdminSite
from obp.models import *
from django.utils.html import format_html
from jet.admin import CompactInline
#from django.utils.translation import ugettext_lazy as _
from jet.dashboard import modules
from jet.dashboard.dashboard import Dashboard, AppIndexDashboard
# Register your models here.
#admin.site.register(Special_offers)
#admin.site.register(Stock)
#admin.site.register(Product_order)
class ProductCompositionInline(admin.TabularInline):
model = Product_composition
class OrderInline(admin.TabularInline):
model = Product_order
class Client_OrderInline(admin.TabularInline):
model = Order
class MyAdminSite(AdminSite):
site_header = 'Pizza-Day'
index_template = "admin/index.html"
@admin.register(Product)
class ProductAdmin(admin.ModelAdmin):
#Создание ценового фильтра
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
#Название фильтров
def lookups(self, request, model_admin):
return (
('1', 'до 199'),
('2', '200 - 299'),
('3', '300 - 449'),
('4', 'от 450'),
)
#Запрос на выборку
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte= 0,
price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte = 200,
price__lte = 299)
return go()
if self.value() == '3':
return queryset.filter(price__gte = 300,
price__lte = 449)
if self.value() == '4':
return queryset.filter(price__gte=200,
price__lte=299)
#Отображаемые поля
list_display = ('get_image_html', 'id', 'section_id', 'title', 'getSize', 'getPrice' )
list_displat_links = ('')
inlines = [
ProductCompositionInline
]
#Поле по которому можно сделать поиск
search_fields = ['title']
#Список фильтраций
list_filter = ['section_id', PriceListFilter]
#Получение html блока с рисунком товара
def get_image_html(self, obj):
return format_html('<img src = "{}" style = "height: 30px; border-radius: 5px;"></img>', obj.image.url)
get_image_html.short_description = "Фото товара"
#Получение цены
def getPrice(self, obj):
try:
object = Stock.objects.get( id = obj.id , status = True)
return format_html("<del>{} грн.</del> <span>{} грн. </span>".format(obj.price, object.value) )
except:
pass
#else:
return format_html("<span>" + str( obj.price ) + " грн." + "</span>")
getPrice.short_description = "Цена"
#Получение строки веса + его еденицу измерения
def getSize(self, obj):
return str( obj.size ) + obj.unitSize
getSize.short_description = "Вес"
@admin.register(Order)
class OrderAdmin(admin.ModelAdmin):
class PriceListFilter(admin.SimpleListFilter):
title = 'Цена'
parameter_name = 'цена'
def lookups(self, request, model_admin):
return (
('1', 'до 199'),
('2', '200 - 299'),
('3', '300 - 449'),
('4', 'от 450'),
)
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(price__gte= 0,
price__lte=199)
if self.value() == '2':
return queryset.filter(price__gte = 200,
price__lte = 299)
if self.value() == '3':
return queryset.filter(price__gte = 300,
price__lte = 449)
if self.value() == '4':
return queryset.filter(price__gte=200,
price__lte=299)
list_display = ('id', 'dateTimeOrder', 'price', 'status' )
list_filter = ['dateTimeOrder', PriceListFilter, "status"]
list_editable = ["status"]
inlines = [
OrderInline
]
@admin.register(Client)
class ClientAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'phone_number' )
inlines = [
Client_OrderInline
]
@admin.register(Section)
class SectionAdmin(admin.ModelAdmin):
list_display = ('id', 'title')
class StockAdmin(admin.ModelAdmin):
list_display = ("product_id", "value", "status" )
search_fields = ['product_id__title']
list_filter = ['status']
admin_site = MyAdminSite(name='myadmin')
admin_site.register(Product, ProductAdmin)
admin_site.register(Client, ClientAdmin)
admin_site.register(Order, OrderAdmin)
admin_site.register(Section, SectionAdmin)
admin_site.register(Stock, StockAdmin)
admin_site.register(Product_comment)
admin_site.register(Client_Auth)
admin_site.register(Special_offers)
admin_site.register(Product_rating)
#
#
# class CustomIndexDashboard(Dashboard):
# columns = 3
#
# def init_with_context(self, context):
# self.available_children.append(modules.LinkList)
# self.children.append(modules.LinkList(
# _('Support'),
# children=[
# {
# 'title': _('Django documentation'),
# 'url': 'http://docs.djangoproject.com/',
# 'external': True,
# },
# {
# 'title': _('Django "django-users" mailing list'),
# 'url': 'http://groups.google.com/group/django-users',
# 'external': True,
# },
# {
# 'title': _('Django irc channel'),
# 'url': 'irc://irc.freenode.net/django',
# 'external': True,
# },
# ],
# column=0,
# order=0
# ))
|
flexible
|
{
"blob_id": "d301ffa790d6444519e354a2b6f8d65f67d380c0",
"index": 1739,
"step-1": "<mask token>\n\n\nclass Client_OrderInline(admin.TabularInline):\n <mask token>\n\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = 'admin/index.html'\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = ('get_image_html', 'id', 'section_id', 'title',\n 'getSize', 'getPrice')\n list_displat_links = ''\n inlines = [ProductCompositionInline]\n search_fields = ['title']\n list_filter = ['section_id', PriceListFilter]\n\n def get_image_html(self, obj):\n return format_html(\n '<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>'\n , obj.image.url)\n get_image_html.short_description = 'Фото товара'\n\n def getPrice(self, obj):\n try:\n object = Stock.objects.get(id=obj.id, status=True)\n return format_html('<del>{} грн.</del> <span>{} грн. </span>'.\n format(obj.price, object.value))\n except:\n pass\n return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')\n getPrice.short_description = 'Цена'\n\n def getSize(self, obj):\n return str(obj.size) + obj.unitSize\n getSize.short_description = 'Вес'\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = 'id', 'dateTimeOrder', 'price', 'status'\n list_filter = ['dateTimeOrder', PriceListFilter, 'status']\n list_editable = ['status']\n inlines = [OrderInline]\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = 'id', 'name', 'phone_number'\n inlines = [Client_OrderInline]\n\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = 'id', 'title'\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = 'product_id', 'value', 'status'\n search_fields = ['product_id__title']\n list_filter = ['status']\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Client_OrderInline(admin.TabularInline):\n model = Order\n\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = 'admin/index.html'\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = ('get_image_html', 'id', 'section_id', 'title',\n 'getSize', 'getPrice')\n list_displat_links = ''\n inlines = [ProductCompositionInline]\n search_fields = ['title']\n list_filter = ['section_id', PriceListFilter]\n\n def get_image_html(self, obj):\n return format_html(\n '<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>'\n , obj.image.url)\n get_image_html.short_description = 'Фото товара'\n\n def getPrice(self, obj):\n try:\n object = Stock.objects.get(id=obj.id, status=True)\n return format_html('<del>{} грн.</del> <span>{} грн. </span>'.\n format(obj.price, object.value))\n except:\n pass\n return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')\n getPrice.short_description = 'Цена'\n\n def getSize(self, obj):\n return str(obj.size) + obj.unitSize\n getSize.short_description = 'Вес'\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = 'id', 'dateTimeOrder', 'price', 'status'\n list_filter = ['dateTimeOrder', PriceListFilter, 'status']\n list_editable = ['status']\n inlines = [OrderInline]\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = 'id', 'name', 'phone_number'\n inlines = [Client_OrderInline]\n\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = 'id', 'title'\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = 'product_id', 'value', 'status'\n search_fields = ['product_id__title']\n list_filter = ['status']\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OrderInline(admin.TabularInline):\n <mask token>\n\n\nclass Client_OrderInline(admin.TabularInline):\n model = Order\n\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = 'admin/index.html'\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = ('get_image_html', 'id', 'section_id', 'title',\n 'getSize', 'getPrice')\n list_displat_links = ''\n inlines = [ProductCompositionInline]\n search_fields = ['title']\n list_filter = ['section_id', PriceListFilter]\n\n def get_image_html(self, obj):\n return format_html(\n '<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>'\n , obj.image.url)\n get_image_html.short_description = 'Фото товара'\n\n def getPrice(self, obj):\n try:\n object = Stock.objects.get(id=obj.id, status=True)\n return format_html('<del>{} грн.</del> <span>{} грн. </span>'.\n format(obj.price, object.value))\n except:\n pass\n return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')\n getPrice.short_description = 'Цена'\n\n def getSize(self, obj):\n return str(obj.size) + obj.unitSize\n getSize.short_description = 'Вес'\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = 'id', 'dateTimeOrder', 'price', 'status'\n list_filter = ['dateTimeOrder', PriceListFilter, 'status']\n list_editable = ['status']\n inlines = [OrderInline]\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = 'id', 'name', 'phone_number'\n inlines = [Client_OrderInline]\n\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = 'id', 'title'\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = 'product_id', 'value', 'status'\n search_fields = ['product_id__title']\n list_filter = ['status']\n\n\n<mask token>\n",
"step-4": "from django.contrib import admin\nfrom django.contrib.admin.sites import AdminSite\nfrom obp.models import *\nfrom django.utils.html import format_html\nfrom jet.admin import CompactInline\nfrom jet.dashboard import modules\nfrom jet.dashboard.dashboard import Dashboard, AppIndexDashboard\n\n\nclass ProductCompositionInline(admin.TabularInline):\n model = Product_composition\n\n\nclass OrderInline(admin.TabularInline):\n model = Product_order\n\n\nclass Client_OrderInline(admin.TabularInline):\n model = Order\n\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = 'admin/index.html'\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = ('get_image_html', 'id', 'section_id', 'title',\n 'getSize', 'getPrice')\n list_displat_links = ''\n inlines = [ProductCompositionInline]\n search_fields = ['title']\n list_filter = ['section_id', PriceListFilter]\n\n def get_image_html(self, obj):\n return format_html(\n '<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>'\n , obj.image.url)\n get_image_html.short_description = 'Фото товара'\n\n def getPrice(self, obj):\n try:\n object = Stock.objects.get(id=obj.id, status=True)\n return format_html('<del>{} грн.</del> <span>{} грн. </span>'.\n format(obj.price, object.value))\n except:\n pass\n return format_html('<span>' + str(obj.price) + ' грн.' + '</span>')\n getPrice.short_description = 'Цена'\n\n def getSize(self, obj):\n return str(obj.size) + obj.unitSize\n getSize.short_description = 'Вес'\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n\n\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n\n def lookups(self, request, model_admin):\n return ('1', 'до 199'), ('2', '200 - 299'), ('3', '300 - 449'), (\n '4', 'от 450')\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte=0, price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte=200, price__lte=299)\n if self.value() == '3':\n return queryset.filter(price__gte=300, price__lte=449)\n if self.value() == '4':\n return queryset.filter(price__gte=200, price__lte=299)\n list_display = 'id', 'dateTimeOrder', 'price', 'status'\n list_filter = ['dateTimeOrder', PriceListFilter, 'status']\n list_editable = ['status']\n inlines = [OrderInline]\n\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = 'id', 'name', 'phone_number'\n inlines = [Client_OrderInline]\n\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = 'id', 'title'\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = 'product_id', 'value', 'status'\n search_fields = ['product_id__title']\n list_filter = ['status']\n\n\nadmin_site = MyAdminSite(name='myadmin')\nadmin_site.register(Product, ProductAdmin)\nadmin_site.register(Client, ClientAdmin)\nadmin_site.register(Order, OrderAdmin)\nadmin_site.register(Section, SectionAdmin)\nadmin_site.register(Stock, StockAdmin)\nadmin_site.register(Product_comment)\nadmin_site.register(Client_Auth)\nadmin_site.register(Special_offers)\nadmin_site.register(Product_rating)\n",
"step-5": "from django.contrib import admin\nfrom django.contrib.admin.sites import AdminSite\nfrom obp.models import *\nfrom django.utils.html import format_html\nfrom jet.admin import CompactInline\n#from django.utils.translation import ugettext_lazy as _\nfrom jet.dashboard import modules\nfrom jet.dashboard.dashboard import Dashboard, AppIndexDashboard\n# Register your models here.\n\n#admin.site.register(Special_offers)\n#admin.site.register(Stock)\n#admin.site.register(Product_order)\nclass ProductCompositionInline(admin.TabularInline):\n model = Product_composition\n\nclass OrderInline(admin.TabularInline):\n model = Product_order\n\nclass Client_OrderInline(admin.TabularInline):\n model = Order\n\nclass MyAdminSite(AdminSite):\n site_header = 'Pizza-Day'\n index_template = \"admin/index.html\"\n\n\[email protected](Product)\nclass ProductAdmin(admin.ModelAdmin):\n #Создание ценового фильтра\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n #Название фильтров\n def lookups(self, request, model_admin):\n return (\n ('1', 'до 199'),\n ('2', '200 - 299'),\n ('3', '300 - 449'),\n ('4', 'от 450'),\n )\n #Запрос на выборку\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte= 0,\n price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte = 200,\n price__lte = 299)\n return go()\n if self.value() == '3':\n return queryset.filter(price__gte = 300,\n price__lte = 449)\n if self.value() == '4':\n return queryset.filter(price__gte=200,\n price__lte=299)\n #Отображаемые поля\n list_display = ('get_image_html', 'id', 'section_id', 'title', 'getSize', 'getPrice' )\n\n list_displat_links = ('')\n\n inlines = [\n ProductCompositionInline\n ]\n #Поле по которому можно сделать поиск\n search_fields = ['title']\n #Список фильтраций\n list_filter = ['section_id', PriceListFilter]\n\n #Получение html блока с рисунком товара\n def get_image_html(self, obj):\n return format_html('<img src = \"{}\" style = \"height: 30px; border-radius: 5px;\"></img>', obj.image.url)\n get_image_html.short_description = \"Фото товара\"\n\n #Получение цены\n def getPrice(self, obj):\n\n try:\n object = Stock.objects.get( id = obj.id , status = True)\n return format_html(\"<del>{} грн.</del> <span>{} грн. </span>\".format(obj.price, object.value) )\n except:\n pass\n #else:\n return format_html(\"<span>\" + str( obj.price ) + \" грн.\" + \"</span>\")\n getPrice.short_description = \"Цена\"\n\n #Получение строки веса + его еденицу измерения\n def getSize(self, obj):\n return str( obj.size ) + obj.unitSize\n getSize.short_description = \"Вес\"\n\n\[email protected](Order)\nclass OrderAdmin(admin.ModelAdmin):\n class PriceListFilter(admin.SimpleListFilter):\n title = 'Цена'\n parameter_name = 'цена'\n def lookups(self, request, model_admin):\n return (\n ('1', 'до 199'),\n ('2', '200 - 299'),\n ('3', '300 - 449'),\n ('4', 'от 450'),\n )\n\n def queryset(self, request, queryset):\n if self.value() == '1':\n return queryset.filter(price__gte= 0,\n price__lte=199)\n if self.value() == '2':\n return queryset.filter(price__gte = 200,\n price__lte = 299)\n if self.value() == '3':\n return queryset.filter(price__gte = 300,\n price__lte = 449)\n if self.value() == '4':\n return queryset.filter(price__gte=200,\n price__lte=299)\n\n list_display = ('id', 'dateTimeOrder', 'price', 'status' )\n list_filter = ['dateTimeOrder', PriceListFilter, \"status\"]\n list_editable = [\"status\"]\n\n inlines = [\n OrderInline\n ]\n\[email protected](Client)\nclass ClientAdmin(admin.ModelAdmin):\n list_display = ('id', 'name', 'phone_number' )\n inlines = [\n Client_OrderInline\n ]\n\[email protected](Section)\nclass SectionAdmin(admin.ModelAdmin):\n list_display = ('id', 'title')\n\n\nclass StockAdmin(admin.ModelAdmin):\n list_display = (\"product_id\", \"value\", \"status\" )\n search_fields = ['product_id__title']\n list_filter = ['status']\n\nadmin_site = MyAdminSite(name='myadmin')\nadmin_site.register(Product, ProductAdmin)\nadmin_site.register(Client, ClientAdmin)\nadmin_site.register(Order, OrderAdmin)\nadmin_site.register(Section, SectionAdmin)\nadmin_site.register(Stock, StockAdmin)\nadmin_site.register(Product_comment)\nadmin_site.register(Client_Auth)\nadmin_site.register(Special_offers)\nadmin_site.register(Product_rating)\n\n\n\n# \n#\n# class CustomIndexDashboard(Dashboard):\n# columns = 3\n#\n# def init_with_context(self, context):\n# self.available_children.append(modules.LinkList)\n# self.children.append(modules.LinkList(\n# _('Support'),\n# children=[\n# {\n# 'title': _('Django documentation'),\n# 'url': 'http://docs.djangoproject.com/',\n# 'external': True,\n# },\n# {\n# 'title': _('Django \"django-users\" mailing list'),\n# 'url': 'http://groups.google.com/group/django-users',\n# 'external': True,\n# },\n# {\n# 'title': _('Django irc channel'),\n# 'url': 'irc://irc.freenode.net/django',\n# 'external': True,\n# },\n# ],\n# column=0,\n# order=0\n# ))\n",
"step-ids": [
16,
17,
18,
24,
25
]
}
|
[
16,
17,
18,
24,
25
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for word in stdin:
lst_in = word
match = re.finditer('(\\w)\\1+', lst_in)
for item in match:
lst_in = lst_in.replace(item[0], item[0][0])
print(lst_in, end='')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from sys import stdin
import re
for word in stdin:
lst_in = word
match = re.finditer('(\\w)\\1+', lst_in)
for item in match:
lst_in = lst_in.replace(item[0], item[0][0])
print(lst_in, end='')
<|reserved_special_token_1|>
"""
Вам дана последовательность строк.
В каждой строке замените все вхождения нескольких одинаковых букв на одну букву.
Буквой считается символ из группы \w.
Sample Input:
attraction
buzzzz
Sample Output:
atraction
buz
"""
from sys import stdin
import re
for word in stdin:
lst_in = word
match = re.finditer(r'(\w)\1+', lst_in)
for item in match:
lst_in = lst_in.replace(item[0], item[0][0])
print(lst_in, end='')
|
flexible
|
{
"blob_id": "5b7c04f23fb674191639e95dff8c530933379d67",
"index": 3686,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor word in stdin:\n lst_in = word\n match = re.finditer('(\\\\w)\\\\1+', lst_in)\n for item in match:\n lst_in = lst_in.replace(item[0], item[0][0])\n print(lst_in, end='')\n",
"step-3": "<mask token>\nfrom sys import stdin\nimport re\nfor word in stdin:\n lst_in = word\n match = re.finditer('(\\\\w)\\\\1+', lst_in)\n for item in match:\n lst_in = lst_in.replace(item[0], item[0][0])\n print(lst_in, end='')\n",
"step-4": "\"\"\"\nВам дана последовательность строк.\nВ каждой строке замените все вхождения нескольких одинаковых букв на одну букву.\nБуквой считается символ из группы \\w.\nSample Input:\n\nattraction\nbuzzzz\nSample Output:\n\natraction\nbuz\n\"\"\"\nfrom sys import stdin\nimport re\n\nfor word in stdin:\n lst_in = word\n match = re.finditer(r'(\\w)\\1+', lst_in)\n for item in match:\n lst_in = lst_in.replace(item[0], item[0][0])\n print(lst_in, end='')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'
), fixed_length=None):
""" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)
into a (n+1)-d array, only allow the first dim has variable lengths.
Args:
sequences: list(n-d tensor or list)
dtype: np.dtype or torch.dtype
device:
fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.
return will be of shape [len(sequences), fixed_length, ...]
Returns:
padded_seqs: ((n+1)-d tensor) padded with zeros
mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
1 indicate valid, 0 otherwise
Examples:
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=torch.long)
>>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=torch.float)
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=np.float32)
>>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=np.float32)
"""
if isinstance(sequences[0], list):
if 'torch' in str(dtype):
sequences = [torch.tensor(s, dtype=dtype, device=device) for s in
sequences]
else:
sequences = [np.asarray(s, dtype=dtype) for s in sequences]
extra_dims = sequences[0].shape[1:]
lengths = [len(seq) for seq in sequences]
if fixed_length is not None:
max_length = fixed_length
else:
max_length = max(lengths)
if isinstance(sequences[0], torch.Tensor):
assert 'torch' in str(dtype), 'dtype and input type does not match'
padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,
dtype=dtype, device=device)
mask = torch.zeros((len(sequences), max_length), dtype=torch.
float32, device=device)
else:
assert 'numpy' in str(dtype), 'dtype and input type does not match'
padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,
dtype=dtype)
mask = np.zeros((len(sequences), max_length), dtype=np.float32)
for idx, seq in enumerate(sequences):
end = lengths[idx]
padded_seqs[idx, :end] = seq
mask[idx, :end] = 1
return padded_seqs, mask
<|reserved_special_token_0|>
def find_max_triples_from_upper_triangle_product(upper_product, top_n=5,
prob_thd=None):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]
Args:
upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx
top_n (int): return topN pairs with highest values
prob_thd (float or None):
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
batched_sorted_triple = []
for idx, e in enumerate(upper_product):
sorted_triple = top_n_array_2d(e, top_n=top_n)
if prob_thd is not None:
sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]
batched_sorted_triple.append(sorted_triple)
return batched_sorted_triple
def top_n_array_2d(array_2d, top_n):
""" Get topN indices and values of a 2d array, return a tuple of indices and their values,
ranked by the value
"""
row_indices, column_indices = np.unravel_index(np.argsort(array_2d,
axis=None), array_2d.shape)
row_indices = row_indices[::-1][:top_n]
column_indices = column_indices[::-1][:top_n]
sorted_values = array_2d[row_indices, column_indices]
return np.stack([row_indices, column_indices, sorted_values], axis=1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'
), fixed_length=None):
""" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)
into a (n+1)-d array, only allow the first dim has variable lengths.
Args:
sequences: list(n-d tensor or list)
dtype: np.dtype or torch.dtype
device:
fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.
return will be of shape [len(sequences), fixed_length, ...]
Returns:
padded_seqs: ((n+1)-d tensor) padded with zeros
mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
1 indicate valid, 0 otherwise
Examples:
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=torch.long)
>>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=torch.float)
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=np.float32)
>>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=np.float32)
"""
if isinstance(sequences[0], list):
if 'torch' in str(dtype):
sequences = [torch.tensor(s, dtype=dtype, device=device) for s in
sequences]
else:
sequences = [np.asarray(s, dtype=dtype) for s in sequences]
extra_dims = sequences[0].shape[1:]
lengths = [len(seq) for seq in sequences]
if fixed_length is not None:
max_length = fixed_length
else:
max_length = max(lengths)
if isinstance(sequences[0], torch.Tensor):
assert 'torch' in str(dtype), 'dtype and input type does not match'
padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,
dtype=dtype, device=device)
mask = torch.zeros((len(sequences), max_length), dtype=torch.
float32, device=device)
else:
assert 'numpy' in str(dtype), 'dtype and input type does not match'
padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,
dtype=dtype)
mask = np.zeros((len(sequences), max_length), dtype=np.float32)
for idx, seq in enumerate(sequences):
end = lengths[idx]
padded_seqs[idx, :end] = seq
mask[idx, :end] = 1
return padded_seqs, mask
def pad_sequences_2d(sequences, dtype=torch.long):
""" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,
only allow the first two dims has variable lengths
Args:
sequences: list(n-d tensor or list)
dtype: torch.long for word indices / torch.float (float32) for other cases
Returns:
Examples:
>>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]
>>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])
>>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]
>>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])
>>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]
>>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])
# TODO add support for numpy array
"""
bsz = len(sequences)
para_lengths = [len(seq) for seq in sequences]
max_para_len = max(para_lengths)
sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]
max_sen_len = max([max(e) for e in sen_lengths])
if isinstance(sequences[0], torch.Tensor):
extra_dims = sequences[0].shape[2:]
elif isinstance(sequences[0][0], torch.Tensor):
extra_dims = sequences[0][0].shape[1:]
else:
sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in
seq] for seq in sequences]
extra_dims = ()
padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,
dtype=dtype)
mask = torch.zeros(bsz, max_para_len, max_sen_len).float()
for b_i in range(bsz):
for sen_i, sen_l in enumerate(sen_lengths[b_i]):
padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]
mask[b_i, sen_i, :sen_l] = 1
return padded_seqs, mask
<|reserved_special_token_0|>
def find_max_triples_from_upper_triangle_product(upper_product, top_n=5,
prob_thd=None):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]
Args:
upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx
top_n (int): return topN pairs with highest values
prob_thd (float or None):
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
batched_sorted_triple = []
for idx, e in enumerate(upper_product):
sorted_triple = top_n_array_2d(e, top_n=top_n)
if prob_thd is not None:
sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]
batched_sorted_triple.append(sorted_triple)
return batched_sorted_triple
def top_n_array_2d(array_2d, top_n):
""" Get topN indices and values of a 2d array, return a tuple of indices and their values,
ranked by the value
"""
row_indices, column_indices = np.unravel_index(np.argsort(array_2d,
axis=None), array_2d.shape)
row_indices = row_indices[::-1][:top_n]
column_indices = column_indices[::-1][:top_n]
sorted_values = array_2d[row_indices, column_indices]
return np.stack([row_indices, column_indices, sorted_values], axis=1)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'
), fixed_length=None):
""" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)
into a (n+1)-d array, only allow the first dim has variable lengths.
Args:
sequences: list(n-d tensor or list)
dtype: np.dtype or torch.dtype
device:
fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.
return will be of shape [len(sequences), fixed_length, ...]
Returns:
padded_seqs: ((n+1)-d tensor) padded with zeros
mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
1 indicate valid, 0 otherwise
Examples:
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=torch.long)
>>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=torch.float)
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=np.float32)
>>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=np.float32)
"""
if isinstance(sequences[0], list):
if 'torch' in str(dtype):
sequences = [torch.tensor(s, dtype=dtype, device=device) for s in
sequences]
else:
sequences = [np.asarray(s, dtype=dtype) for s in sequences]
extra_dims = sequences[0].shape[1:]
lengths = [len(seq) for seq in sequences]
if fixed_length is not None:
max_length = fixed_length
else:
max_length = max(lengths)
if isinstance(sequences[0], torch.Tensor):
assert 'torch' in str(dtype), 'dtype and input type does not match'
padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,
dtype=dtype, device=device)
mask = torch.zeros((len(sequences), max_length), dtype=torch.
float32, device=device)
else:
assert 'numpy' in str(dtype), 'dtype and input type does not match'
padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,
dtype=dtype)
mask = np.zeros((len(sequences), max_length), dtype=np.float32)
for idx, seq in enumerate(sequences):
end = lengths[idx]
padded_seqs[idx, :end] = seq
mask[idx, :end] = 1
return padded_seqs, mask
def pad_sequences_2d(sequences, dtype=torch.long):
""" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,
only allow the first two dims has variable lengths
Args:
sequences: list(n-d tensor or list)
dtype: torch.long for word indices / torch.float (float32) for other cases
Returns:
Examples:
>>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]
>>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])
>>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]
>>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])
>>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]
>>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])
# TODO add support for numpy array
"""
bsz = len(sequences)
para_lengths = [len(seq) for seq in sequences]
max_para_len = max(para_lengths)
sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]
max_sen_len = max([max(e) for e in sen_lengths])
if isinstance(sequences[0], torch.Tensor):
extra_dims = sequences[0].shape[2:]
elif isinstance(sequences[0][0], torch.Tensor):
extra_dims = sequences[0][0].shape[1:]
else:
sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in
seq] for seq in sequences]
extra_dims = ()
padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,
dtype=dtype)
mask = torch.zeros(bsz, max_para_len, max_sen_len).float()
for b_i in range(bsz):
for sen_i, sen_l in enumerate(sen_lengths[b_i]):
padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]
mask[b_i, sen_i, :sen_l] = 1
return padded_seqs, mask
def find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=
'torch'):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]
Args:
st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities
ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities
top_n (int): return topN pairs with highest values
prob_thd (float):
tensor_type: str, np or torch
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
if tensor_type == 'torch':
st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()
product = np.einsum('bm,bn->bmn', st_prob, ed_prob)
upper_product = np.triu(product, k=1)
return find_max_triples_from_upper_triangle_product(upper_product,
top_n=top_n, prob_thd=prob_thd)
def find_max_triples_from_upper_triangle_product(upper_product, top_n=5,
prob_thd=None):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]
Args:
upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx
top_n (int): return topN pairs with highest values
prob_thd (float or None):
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
batched_sorted_triple = []
for idx, e in enumerate(upper_product):
sorted_triple = top_n_array_2d(e, top_n=top_n)
if prob_thd is not None:
sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]
batched_sorted_triple.append(sorted_triple)
return batched_sorted_triple
def top_n_array_2d(array_2d, top_n):
""" Get topN indices and values of a 2d array, return a tuple of indices and their values,
ranked by the value
"""
row_indices, column_indices = np.unravel_index(np.argsort(array_2d,
axis=None), array_2d.shape)
row_indices = row_indices[::-1][:top_n]
column_indices = column_indices[::-1][:top_n]
sorted_values = array_2d[row_indices, column_indices]
return np.stack([row_indices, column_indices, sorted_values], axis=1)
<|reserved_special_token_1|>
import numpy as np
import torch
def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'
), fixed_length=None):
""" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)
into a (n+1)-d array, only allow the first dim has variable lengths.
Args:
sequences: list(n-d tensor or list)
dtype: np.dtype or torch.dtype
device:
fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.
return will be of shape [len(sequences), fixed_length, ...]
Returns:
padded_seqs: ((n+1)-d tensor) padded with zeros
mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
1 indicate valid, 0 otherwise
Examples:
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=torch.long)
>>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=torch.float)
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=np.float32)
>>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=np.float32)
"""
if isinstance(sequences[0], list):
if 'torch' in str(dtype):
sequences = [torch.tensor(s, dtype=dtype, device=device) for s in
sequences]
else:
sequences = [np.asarray(s, dtype=dtype) for s in sequences]
extra_dims = sequences[0].shape[1:]
lengths = [len(seq) for seq in sequences]
if fixed_length is not None:
max_length = fixed_length
else:
max_length = max(lengths)
if isinstance(sequences[0], torch.Tensor):
assert 'torch' in str(dtype), 'dtype and input type does not match'
padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,
dtype=dtype, device=device)
mask = torch.zeros((len(sequences), max_length), dtype=torch.
float32, device=device)
else:
assert 'numpy' in str(dtype), 'dtype and input type does not match'
padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,
dtype=dtype)
mask = np.zeros((len(sequences), max_length), dtype=np.float32)
for idx, seq in enumerate(sequences):
end = lengths[idx]
padded_seqs[idx, :end] = seq
mask[idx, :end] = 1
return padded_seqs, mask
def pad_sequences_2d(sequences, dtype=torch.long):
""" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,
only allow the first two dims has variable lengths
Args:
sequences: list(n-d tensor or list)
dtype: torch.long for word indices / torch.float (float32) for other cases
Returns:
Examples:
>>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]
>>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])
>>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]
>>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])
>>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]
>>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])
# TODO add support for numpy array
"""
bsz = len(sequences)
para_lengths = [len(seq) for seq in sequences]
max_para_len = max(para_lengths)
sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]
max_sen_len = max([max(e) for e in sen_lengths])
if isinstance(sequences[0], torch.Tensor):
extra_dims = sequences[0].shape[2:]
elif isinstance(sequences[0][0], torch.Tensor):
extra_dims = sequences[0][0].shape[1:]
else:
sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in
seq] for seq in sequences]
extra_dims = ()
padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,
dtype=dtype)
mask = torch.zeros(bsz, max_para_len, max_sen_len).float()
for b_i in range(bsz):
for sen_i, sen_l in enumerate(sen_lengths[b_i]):
padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]
mask[b_i, sen_i, :sen_l] = 1
return padded_seqs, mask
def find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=
'torch'):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]
Args:
st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities
ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities
top_n (int): return topN pairs with highest values
prob_thd (float):
tensor_type: str, np or torch
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
if tensor_type == 'torch':
st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()
product = np.einsum('bm,bn->bmn', st_prob, ed_prob)
upper_product = np.triu(product, k=1)
return find_max_triples_from_upper_triangle_product(upper_product,
top_n=top_n, prob_thd=prob_thd)
def find_max_triples_from_upper_triangle_product(upper_product, top_n=5,
prob_thd=None):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]
Args:
upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx
top_n (int): return topN pairs with highest values
prob_thd (float or None):
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
batched_sorted_triple = []
for idx, e in enumerate(upper_product):
sorted_triple = top_n_array_2d(e, top_n=top_n)
if prob_thd is not None:
sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]
batched_sorted_triple.append(sorted_triple)
return batched_sorted_triple
def top_n_array_2d(array_2d, top_n):
""" Get topN indices and values of a 2d array, return a tuple of indices and their values,
ranked by the value
"""
row_indices, column_indices = np.unravel_index(np.argsort(array_2d,
axis=None), array_2d.shape)
row_indices = row_indices[::-1][:top_n]
column_indices = column_indices[::-1][:top_n]
sorted_values = array_2d[row_indices, column_indices]
return np.stack([row_indices, column_indices, sorted_values], axis=1)
<|reserved_special_token_1|>
import numpy as np
import torch
def pad_sequences_1d(sequences, dtype=torch.long, device=torch.device("cpu"), fixed_length=None):
""" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)
into a (n+1)-d array, only allow the first dim has variable lengths.
Args:
sequences: list(n-d tensor or list)
dtype: np.dtype or torch.dtype
device:
fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.
return will be of shape [len(sequences), fixed_length, ...]
Returns:
padded_seqs: ((n+1)-d tensor) padded with zeros
mask: (2d tensor) of the same shape as the first two dims of padded_seqs,
1 indicate valid, 0 otherwise
Examples:
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=torch.long)
>>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=torch.float)
>>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]
>>> pad_sequences_1d(test_data_list, dtype=np.float32)
>>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]
>>> pad_sequences_1d(test_data_3d, dtype=np.float32)
"""
if isinstance(sequences[0], list):
if "torch" in str(dtype):
sequences = [torch.tensor(s, dtype=dtype, device=device) for s in sequences]
else:
sequences = [np.asarray(s, dtype=dtype) for s in sequences]
extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements
lengths = [len(seq) for seq in sequences]
if fixed_length is not None:
max_length = fixed_length
else:
max_length = max(lengths)
if isinstance(sequences[0], torch.Tensor):
assert "torch" in str(dtype), "dtype and input type does not match"
padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims, dtype=dtype, device=device)
mask = torch.zeros((len(sequences), max_length), dtype=torch.float32, device=device)
else: # np
assert "numpy" in str(dtype), "dtype and input type does not match"
padded_seqs = np.zeros((len(sequences), max_length) + extra_dims, dtype=dtype)
mask = np.zeros((len(sequences), max_length), dtype=np.float32)
for idx, seq in enumerate(sequences):
end = lengths[idx]
padded_seqs[idx, :end] = seq
mask[idx, :end] = 1
return padded_seqs, mask # , lengths
def pad_sequences_2d(sequences, dtype=torch.long):
""" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,
only allow the first two dims has variable lengths
Args:
sequences: list(n-d tensor or list)
dtype: torch.long for word indices / torch.float (float32) for other cases
Returns:
Examples:
>>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]
>>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])
>>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]
>>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])
>>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]
>>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])
# TODO add support for numpy array
"""
bsz = len(sequences)
para_lengths = [len(seq) for seq in sequences]
max_para_len = max(para_lengths)
sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]
max_sen_len = max([max(e) for e in sen_lengths])
if isinstance(sequences[0], torch.Tensor):
extra_dims = sequences[0].shape[2:]
elif isinstance(sequences[0][0], torch.Tensor):
extra_dims = sequences[0][0].shape[1:]
else:
sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in seq] for seq in sequences]
extra_dims = ()
padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims, dtype=dtype)
mask = torch.zeros(bsz, max_para_len, max_sen_len).float()
for b_i in range(bsz):
for sen_i, sen_l in enumerate(sen_lengths[b_i]):
padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]
mask[b_i, sen_i, :sen_l] = 1
return padded_seqs, mask # , sen_lengths
def find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type="torch"):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]
Args:
st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities
ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities
top_n (int): return topN pairs with highest values
prob_thd (float):
tensor_type: str, np or torch
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
if tensor_type == "torch":
st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()
product = np.einsum("bm,bn->bmn", st_prob, ed_prob)
# (N, L, L) the lower part becomes zeros, start_idx < ed_idx
upper_product = np.triu(product, k=1)
return find_max_triples_from_upper_triangle_product(upper_product, top_n=top_n, prob_thd=prob_thd)
def find_max_triples_from_upper_triangle_product(upper_product, top_n=5, prob_thd=None):
""" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]
Args:
upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx
top_n (int): return topN pairs with highest values
prob_thd (float or None):
Returns:
batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]
"""
batched_sorted_triple = []
for idx, e in enumerate(upper_product):
sorted_triple = top_n_array_2d(e, top_n=top_n)
if prob_thd is not None:
sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]
batched_sorted_triple.append(sorted_triple)
return batched_sorted_triple
def top_n_array_2d(array_2d, top_n):
""" Get topN indices and values of a 2d array, return a tuple of indices and their values,
ranked by the value
"""
row_indices, column_indices = np.unravel_index(np.argsort(array_2d, axis=None), array_2d.shape)
row_indices = row_indices[::-1][:top_n]
column_indices = column_indices[::-1][:top_n]
sorted_values = array_2d[row_indices, column_indices]
return np.stack([row_indices, column_indices, sorted_values], axis=1) # (N, 3)
|
flexible
|
{
"blob_id": "788d9fa03c4311a8077d492b1a2b06d1f88826a3",
"index": 5570,
"step-1": "<mask token>\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'\n ), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if 'torch' in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in\n sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n extra_dims = sequences[0].shape[1:]\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert 'torch' in str(dtype), 'dtype and input type does not match'\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.\n float32, device=device)\n else:\n assert 'numpy' in str(dtype), 'dtype and input type does not match'\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask\n\n\n<mask token>\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5,\n prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d,\n axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1)\n",
"step-2": "<mask token>\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'\n ), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if 'torch' in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in\n sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n extra_dims = sequences[0].shape[1:]\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert 'torch' in str(dtype), 'dtype and input type does not match'\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.\n float32, device=device)\n else:\n assert 'numpy' in str(dtype), 'dtype and input type does not match'\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask\n\n\ndef pad_sequences_2d(sequences, dtype=torch.long):\n \"\"\" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,\n only allow the first two dims has variable lengths\n Args:\n sequences: list(n-d tensor or list)\n dtype: torch.long for word indices / torch.float (float32) for other cases\n Returns:\n Examples:\n >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]\n >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])\n >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]\n >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])\n >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]\n >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])\n # TODO add support for numpy array\n \"\"\"\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in\n seq] for seq in sequences]\n extra_dims = ()\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,\n dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask\n\n\n<mask token>\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5,\n prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d,\n axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1)\n",
"step-3": "<mask token>\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'\n ), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if 'torch' in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in\n sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n extra_dims = sequences[0].shape[1:]\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert 'torch' in str(dtype), 'dtype and input type does not match'\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.\n float32, device=device)\n else:\n assert 'numpy' in str(dtype), 'dtype and input type does not match'\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask\n\n\ndef pad_sequences_2d(sequences, dtype=torch.long):\n \"\"\" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,\n only allow the first two dims has variable lengths\n Args:\n sequences: list(n-d tensor or list)\n dtype: torch.long for word indices / torch.float (float32) for other cases\n Returns:\n Examples:\n >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]\n >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])\n >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]\n >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])\n >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]\n >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])\n # TODO add support for numpy array\n \"\"\"\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in\n seq] for seq in sequences]\n extra_dims = ()\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,\n dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask\n\n\ndef find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=\n 'torch'):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]\n Args:\n st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities\n ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities\n top_n (int): return topN pairs with highest values\n prob_thd (float):\n tensor_type: str, np or torch\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n if tensor_type == 'torch':\n st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()\n product = np.einsum('bm,bn->bmn', st_prob, ed_prob)\n upper_product = np.triu(product, k=1)\n return find_max_triples_from_upper_triangle_product(upper_product,\n top_n=top_n, prob_thd=prob_thd)\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5,\n prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d,\n axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1)\n",
"step-4": "import numpy as np\nimport torch\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device('cpu'\n ), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if 'torch' in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in\n sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n extra_dims = sequences[0].shape[1:]\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert 'torch' in str(dtype), 'dtype and input type does not match'\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.\n float32, device=device)\n else:\n assert 'numpy' in str(dtype), 'dtype and input type does not match'\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims,\n dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask\n\n\ndef pad_sequences_2d(sequences, dtype=torch.long):\n \"\"\" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,\n only allow the first two dims has variable lengths\n Args:\n sequences: list(n-d tensor or list)\n dtype: torch.long for word indices / torch.float (float32) for other cases\n Returns:\n Examples:\n >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]\n >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])\n >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]\n >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])\n >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]\n >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])\n # TODO add support for numpy array\n \"\"\"\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in\n seq] for seq in sequences]\n extra_dims = ()\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims,\n dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask\n\n\ndef find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=\n 'torch'):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]\n Args:\n st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities\n ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities\n top_n (int): return topN pairs with highest values\n prob_thd (float):\n tensor_type: str, np or torch\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n if tensor_type == 'torch':\n st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()\n product = np.einsum('bm,bn->bmn', st_prob, ed_prob)\n upper_product = np.triu(product, k=1)\n return find_max_triples_from_upper_triangle_product(upper_product,\n top_n=top_n, prob_thd=prob_thd)\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5,\n prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d,\n axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1)\n",
"step-5": "import numpy as np\nimport torch\n\n\ndef pad_sequences_1d(sequences, dtype=torch.long, device=torch.device(\"cpu\"), fixed_length=None):\n \"\"\" Pad a single-nested list or a sequence of n-d array (torch.tensor or np.ndarray)\n into a (n+1)-d array, only allow the first dim has variable lengths.\n Args:\n sequences: list(n-d tensor or list)\n dtype: np.dtype or torch.dtype\n device:\n fixed_length: pad all seq in sequences to fixed length. All seq should have a length <= fixed_length.\n return will be of shape [len(sequences), fixed_length, ...]\n Returns:\n padded_seqs: ((n+1)-d tensor) padded with zeros\n mask: (2d tensor) of the same shape as the first two dims of padded_seqs,\n 1 indicate valid, 0 otherwise\n Examples:\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=torch.long)\n >>> test_data_3d = [torch.randn(2,3,4), torch.randn(4,3,4), torch.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=torch.float)\n >>> test_data_list = [[1,2,3], [1,2], [3,4,7,9]]\n >>> pad_sequences_1d(test_data_list, dtype=np.float32)\n >>> test_data_3d = [np.random.randn(2,3,4), np.random.randn(4,3,4), np.random.randn(1,3,4)]\n >>> pad_sequences_1d(test_data_3d, dtype=np.float32)\n \"\"\"\n if isinstance(sequences[0], list):\n if \"torch\" in str(dtype):\n sequences = [torch.tensor(s, dtype=dtype, device=device) for s in sequences]\n else:\n sequences = [np.asarray(s, dtype=dtype) for s in sequences]\n\n extra_dims = sequences[0].shape[1:] # the extra dims should be the same for all elements\n lengths = [len(seq) for seq in sequences]\n if fixed_length is not None:\n max_length = fixed_length\n else:\n max_length = max(lengths)\n if isinstance(sequences[0], torch.Tensor):\n assert \"torch\" in str(dtype), \"dtype and input type does not match\"\n padded_seqs = torch.zeros((len(sequences), max_length) + extra_dims, dtype=dtype, device=device)\n mask = torch.zeros((len(sequences), max_length), dtype=torch.float32, device=device)\n else: # np\n assert \"numpy\" in str(dtype), \"dtype and input type does not match\"\n padded_seqs = np.zeros((len(sequences), max_length) + extra_dims, dtype=dtype)\n mask = np.zeros((len(sequences), max_length), dtype=np.float32)\n\n for idx, seq in enumerate(sequences):\n end = lengths[idx]\n padded_seqs[idx, :end] = seq\n mask[idx, :end] = 1\n return padded_seqs, mask # , lengths\n\n\ndef pad_sequences_2d(sequences, dtype=torch.long):\n \"\"\" Pad a double-nested list or a sequence of n-d torch tensor into a (n+1)-d tensor,\n only allow the first two dims has variable lengths\n Args:\n sequences: list(n-d tensor or list)\n dtype: torch.long for word indices / torch.float (float32) for other cases\n Returns:\n Examples:\n >>> test_data_list = [[[1, 3, 5], [3, 7, 4, 1]], [[98, 34, 11, 89, 90], [22], [34, 56]],]\n >>> pad_sequences_2d(test_data_list, dtype=torch.long) # torch.Size([2, 3, 5])\n >>> test_data_3d = [torch.randn(2,2,4), torch.randn(4,3,4), torch.randn(1,5,4)]\n >>> pad_sequences_2d(test_data_3d, dtype=torch.float) # torch.Size([2, 3, 5])\n >>> test_data_3d2 = [[torch.randn(2,4), ], [torch.randn(3,4), torch.randn(5,4)]]\n >>> pad_sequences_2d(test_data_3d2, dtype=torch.float) # torch.Size([2, 3, 5])\n # TODO add support for numpy array\n \"\"\"\n bsz = len(sequences)\n para_lengths = [len(seq) for seq in sequences]\n max_para_len = max(para_lengths)\n sen_lengths = [[len(word_seq) for word_seq in seq] for seq in sequences]\n max_sen_len = max([max(e) for e in sen_lengths])\n\n if isinstance(sequences[0], torch.Tensor):\n extra_dims = sequences[0].shape[2:]\n elif isinstance(sequences[0][0], torch.Tensor):\n extra_dims = sequences[0][0].shape[1:]\n else:\n sequences = [[torch.Tensor(word_seq, dtype=dtype) for word_seq in seq] for seq in sequences]\n extra_dims = ()\n\n padded_seqs = torch.zeros((bsz, max_para_len, max_sen_len) + extra_dims, dtype=dtype)\n mask = torch.zeros(bsz, max_para_len, max_sen_len).float()\n\n for b_i in range(bsz):\n for sen_i, sen_l in enumerate(sen_lengths[b_i]):\n padded_seqs[b_i, sen_i, :sen_l] = sequences[b_i][sen_i]\n mask[b_i, sen_i, :sen_l] = 1\n return padded_seqs, mask # , sen_lengths\n\n\ndef find_max_triples(st_prob, ed_prob, top_n=5, prob_thd=None, tensor_type=\"torch\"):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of st_prob[k1] * ed_prob[k2]\n Args:\n st_prob (torch.Tensor or np.ndarray): (N, L) batched start_idx probabilities\n ed_prob (torch.Tensor or np.ndarray): (N, L) batched end_idx probabilities\n top_n (int): return topN pairs with highest values\n prob_thd (float):\n tensor_type: str, np or torch\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n if tensor_type == \"torch\":\n st_prob, ed_prob = st_prob.data.numpy(), ed_prob.data.numpy()\n product = np.einsum(\"bm,bn->bmn\", st_prob, ed_prob)\n # (N, L, L) the lower part becomes zeros, start_idx < ed_idx\n upper_product = np.triu(product, k=1)\n return find_max_triples_from_upper_triangle_product(upper_product, top_n=top_n, prob_thd=prob_thd)\n\n\ndef find_max_triples_from_upper_triangle_product(upper_product, top_n=5, prob_thd=None):\n \"\"\" Find a list of (k1, k2) where k1 < k2 with the maximum values of p1[k1] * p2[k2]\n Args:\n upper_product (torch.Tensor or np.ndarray): (N, L, L), the lower part becomes zeros, end_idx > start_idx\n top_n (int): return topN pairs with highest values\n prob_thd (float or None):\n Returns:\n batched_sorted_triple: N * [(st_idx, ed_idx, confidence), ...]\n \"\"\"\n batched_sorted_triple = []\n for idx, e in enumerate(upper_product):\n sorted_triple = top_n_array_2d(e, top_n=top_n)\n if prob_thd is not None:\n sorted_triple = sorted_triple[sorted_triple[2] >= prob_thd]\n batched_sorted_triple.append(sorted_triple)\n return batched_sorted_triple\n\n\ndef top_n_array_2d(array_2d, top_n):\n \"\"\" Get topN indices and values of a 2d array, return a tuple of indices and their values,\n ranked by the value\n \"\"\"\n row_indices, column_indices = np.unravel_index(np.argsort(array_2d, axis=None), array_2d.shape)\n row_indices = row_indices[::-1][:top_n]\n column_indices = column_indices[::-1][:top_n]\n sorted_values = array_2d[row_indices, column_indices]\n return np.stack([row_indices, column_indices, sorted_values], axis=1) # (N, 3)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from command import Command, is_command, CommandException
from event import Event
class ItemInfo(Command):
@is_command
def item_info(self, player, *args):
if len(args) == 0:
raise CommandException(CommandException.NOT_ENOUGH_ARGUMENTS)
item_id = args[0]
if item_id in player.inventory:
item = player.inventory[item_id]
elif item_id in player.location.lobjects:
item = player.location.lobjects[item_id]
else:
raise CommandException(CommandException.UNKNOWN_ITEM)
return Event('item-info', {"item": item.to_dict()})
|
normal
|
{
"blob_id": "6b2bd6954f188626fa857ffc37611d3f971d22e2",
"index": 5259,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ItemInfo(Command):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ItemInfo(Command):\n\n @is_command\n def item_info(self, player, *args):\n if len(args) == 0:\n raise CommandException(CommandException.NOT_ENOUGH_ARGUMENTS)\n item_id = args[0]\n if item_id in player.inventory:\n item = player.inventory[item_id]\n elif item_id in player.location.lobjects:\n item = player.location.lobjects[item_id]\n else:\n raise CommandException(CommandException.UNKNOWN_ITEM)\n return Event('item-info', {'item': item.to_dict()})\n",
"step-4": "from command import Command, is_command, CommandException\nfrom event import Event\n\n\nclass ItemInfo(Command):\n\n @is_command\n def item_info(self, player, *args):\n if len(args) == 0:\n raise CommandException(CommandException.NOT_ENOUGH_ARGUMENTS)\n item_id = args[0]\n if item_id in player.inventory:\n item = player.inventory[item_id]\n elif item_id in player.location.lobjects:\n item = player.location.lobjects[item_id]\n else:\n raise CommandException(CommandException.UNKNOWN_ITEM)\n return Event('item-info', {'item': item.to_dict()})\n",
"step-5": "from command import Command, is_command, CommandException\nfrom event import Event\n\n\nclass ItemInfo(Command):\n\n @is_command\n def item_info(self, player, *args):\n if len(args) == 0:\n raise CommandException(CommandException.NOT_ENOUGH_ARGUMENTS)\n item_id = args[0]\n if item_id in player.inventory:\n item = player.inventory[item_id]\n elif item_id in player.location.lobjects:\n item = player.location.lobjects[item_id]\n else:\n raise CommandException(CommandException.UNKNOWN_ITEM)\n\n return Event('item-info', {\"item\": item.to_dict()})",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 17 13:07:47 2020
@author: mmm
"""
n = 2
n1 = 10
for i in range(n,n1):
if n > 1:
for j in range(2,i):
if (i % j!= 0):
else:
print(i)
|
normal
|
{
"blob_id": "1855351b20c7965a29864502e4489ab4324c7859",
"index": 4808,
"step-1": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Dec 17 13:07:47 2020\r\n\r\n@author: mmm\r\n\"\"\"\r\n\r\n\r\nn = 2\r\nn1 = 10\r\nfor i in range(n,n1):\r\n if n > 1:\r\n for j in range(2,i):\r\n if (i % j!= 0):\r\n \r\n else:\r\n print(i)\r\n \r\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from keras.models import Sequential
from keras.layers import Convolution2D # for 2d images
from keras.layers import MaxPool2D
from keras.layers import Flatten
from keras.layers import Dense
import tensorflow as tf
from keras_preprocessing.image import ImageDataGenerator
cnn = Sequential()
rgb = 64
# step 1: convolution
# slide feature detectors ("filters") along image
# results feature maps that form convolutional layer
cnn.add(Convolution2D(32, 3, 3, input_shape=(rgb, rgb, 3), activation='relu')) # 32, 3x3 filters
# step 2: pooling
cnn.add(MaxPool2D(pool_size=(2, 2)))
# step 3: flatten
# this vector will be the input of a future ann
cnn.add(Flatten())
# step 4: full connection
cnn.add(Dense(output_dim=128, activation='relu')) # add hidden layers
cnn.add(Dense(output_dim=1, activation='sigmoid')) # sigmoid for binary output
# compile cnn
cnn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# image augmentation - prevent overfitting
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_set = train_datagen.flow_from_directory(
'dataset/training_set',
target_size=(rgb, rgb),
batch_size=32,
class_mode='binary')
test_set = test_datagen.flow_from_directory(
'dataset/test_set',
target_size=(rgb, rgb),
batch_size=32,
class_mode='binary')
cnn.fit_generator(
train_set,
steps_per_epoch=8000, # we have 8k images in our training set
epochs=10,
validation_data=test_set,
validation_steps=2000)
print(cnn.summary())
cnn.save('CatDogModel.h5')
|
normal
|
{
"blob_id": "9fa5f4b4aeb7fe42d313a0ec4e57ce15acbfcf46",
"index": 3960,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ncnn.add(Convolution2D(32, 3, 3, input_shape=(rgb, rgb, 3), activation='relu'))\ncnn.add(MaxPool2D(pool_size=(2, 2)))\ncnn.add(Flatten())\ncnn.add(Dense(output_dim=128, activation='relu'))\ncnn.add(Dense(output_dim=1, activation='sigmoid'))\ncnn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n<mask token>\ncnn.fit_generator(train_set, steps_per_epoch=8000, epochs=10,\n validation_data=test_set, validation_steps=2000)\nprint(cnn.summary())\ncnn.save('CatDogModel.h5')\n",
"step-3": "<mask token>\ncnn = Sequential()\nrgb = 64\ncnn.add(Convolution2D(32, 3, 3, input_shape=(rgb, rgb, 3), activation='relu'))\ncnn.add(MaxPool2D(pool_size=(2, 2)))\ncnn.add(Flatten())\ncnn.add(Dense(output_dim=128, activation='relu'))\ncnn.add(Dense(output_dim=1, activation='sigmoid'))\ncnn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2,\n zoom_range=0.2, horizontal_flip=True)\ntest_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntrain_set = train_datagen.flow_from_directory('dataset/training_set',\n target_size=(rgb, rgb), batch_size=32, class_mode='binary')\ntest_set = test_datagen.flow_from_directory('dataset/test_set', target_size\n =(rgb, rgb), batch_size=32, class_mode='binary')\ncnn.fit_generator(train_set, steps_per_epoch=8000, epochs=10,\n validation_data=test_set, validation_steps=2000)\nprint(cnn.summary())\ncnn.save('CatDogModel.h5')\n",
"step-4": "from keras.models import Sequential\nfrom keras.layers import Convolution2D\nfrom keras.layers import MaxPool2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nimport tensorflow as tf\nfrom keras_preprocessing.image import ImageDataGenerator\ncnn = Sequential()\nrgb = 64\ncnn.add(Convolution2D(32, 3, 3, input_shape=(rgb, rgb, 3), activation='relu'))\ncnn.add(MaxPool2D(pool_size=(2, 2)))\ncnn.add(Flatten())\ncnn.add(Dense(output_dim=128, activation='relu'))\ncnn.add(Dense(output_dim=1, activation='sigmoid'))\ncnn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255, shear_range=0.2,\n zoom_range=0.2, horizontal_flip=True)\ntest_datagen = ImageDataGenerator(rescale=1.0 / 255)\ntrain_set = train_datagen.flow_from_directory('dataset/training_set',\n target_size=(rgb, rgb), batch_size=32, class_mode='binary')\ntest_set = test_datagen.flow_from_directory('dataset/test_set', target_size\n =(rgb, rgb), batch_size=32, class_mode='binary')\ncnn.fit_generator(train_set, steps_per_epoch=8000, epochs=10,\n validation_data=test_set, validation_steps=2000)\nprint(cnn.summary())\ncnn.save('CatDogModel.h5')\n",
"step-5": "from keras.models import Sequential\nfrom keras.layers import Convolution2D # for 2d images\nfrom keras.layers import MaxPool2D\nfrom keras.layers import Flatten\nfrom keras.layers import Dense\nimport tensorflow as tf\nfrom keras_preprocessing.image import ImageDataGenerator\n\ncnn = Sequential()\n\nrgb = 64\n\n# step 1: convolution\n# slide feature detectors (\"filters\") along image\n# results feature maps that form convolutional layer\ncnn.add(Convolution2D(32, 3, 3, input_shape=(rgb, rgb, 3), activation='relu')) # 32, 3x3 filters\n\n# step 2: pooling\ncnn.add(MaxPool2D(pool_size=(2, 2)))\n\n# step 3: flatten\n# this vector will be the input of a future ann\ncnn.add(Flatten())\n\n# step 4: full connection\ncnn.add(Dense(output_dim=128, activation='relu')) # add hidden layers\ncnn.add(Dense(output_dim=1, activation='sigmoid')) # sigmoid for binary output\n\n# compile cnn\ncnn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])\n\n# image augmentation - prevent overfitting\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_set = train_datagen.flow_from_directory(\n 'dataset/training_set',\n target_size=(rgb, rgb),\n batch_size=32,\n class_mode='binary')\n\ntest_set = test_datagen.flow_from_directory(\n 'dataset/test_set',\n target_size=(rgb, rgb),\n batch_size=32,\n class_mode='binary')\n\ncnn.fit_generator(\n train_set,\n steps_per_epoch=8000, # we have 8k images in our training set\n epochs=10,\n validation_data=test_set,\n validation_steps=2000)\n\nprint(cnn.summary())\n\ncnn.save('CatDogModel.h5')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def main():
if IS_TRAINING:
training_imgs_paths = list_images(TRAINING_IMGS_PATH)
train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,
autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,
logging_period=LOGGING_PERIOD)
print('\n>>>>>> Successfully done training...\n')
else:
contents_path = list_images(CONTENTS_DIR)
styles_path = list_images(STYLES_DIR)
model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX
stylize(contents_path, styles_path, OUTPUT_DIR,
ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,
repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=
AUTUENCODER_LEVELS_INFER)
print('\n>>>>>> Successfully done stylizing...\n')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main():
if IS_TRAINING:
training_imgs_paths = list_images(TRAINING_IMGS_PATH)
train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,
autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,
logging_period=LOGGING_PERIOD)
print('\n>>>>>> Successfully done training...\n')
else:
contents_path = list_images(CONTENTS_DIR)
styles_path = list_images(STYLES_DIR)
model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX
stylize(contents_path, styles_path, OUTPUT_DIR,
ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,
repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=
AUTUENCODER_LEVELS_INFER)
print('\n>>>>>> Successfully done stylizing...\n')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
IS_TRAINING = True
TRAINING_IMGS_PATH = 'MS_COCO'
ENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'
MODEL_SAVE_PATH = 'models/autoencoder'
MODEL_SAVE_SUFFIX = '-done'
DEBUG = True
LOGGING_PERIOD = 10
AUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]
CONTENTS_DIR = 'images/content'
STYLES_DIR = 'images/style'
OUTPUT_DIR = 'outputs'
STYLE_RATIO = 0.8
REPEAT_PIPELINE = 1
AUTUENCODER_LEVELS_INFER = [3, 2, 1]
def main():
if IS_TRAINING:
training_imgs_paths = list_images(TRAINING_IMGS_PATH)
train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,
autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,
logging_period=LOGGING_PERIOD)
print('\n>>>>>> Successfully done training...\n')
else:
contents_path = list_images(CONTENTS_DIR)
styles_path = list_images(STYLES_DIR)
model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX
stylize(contents_path, styles_path, OUTPUT_DIR,
ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,
repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=
AUTUENCODER_LEVELS_INFER)
print('\n>>>>>> Successfully done stylizing...\n')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
from __future__ import print_function
from train import train
from infer import stylize
from utils import list_images
IS_TRAINING = True
TRAINING_IMGS_PATH = 'MS_COCO'
ENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'
MODEL_SAVE_PATH = 'models/autoencoder'
MODEL_SAVE_SUFFIX = '-done'
DEBUG = True
LOGGING_PERIOD = 10
AUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]
CONTENTS_DIR = 'images/content'
STYLES_DIR = 'images/style'
OUTPUT_DIR = 'outputs'
STYLE_RATIO = 0.8
REPEAT_PIPELINE = 1
AUTUENCODER_LEVELS_INFER = [3, 2, 1]
def main():
if IS_TRAINING:
training_imgs_paths = list_images(TRAINING_IMGS_PATH)
train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,
autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,
logging_period=LOGGING_PERIOD)
print('\n>>>>>> Successfully done training...\n')
else:
contents_path = list_images(CONTENTS_DIR)
styles_path = list_images(STYLES_DIR)
model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX
stylize(contents_path, styles_path, OUTPUT_DIR,
ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,
repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=
AUTUENCODER_LEVELS_INFER)
print('\n>>>>>> Successfully done stylizing...\n')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
# Demo - train the decoders & use them to stylize image
from __future__ import print_function
from train import train
from infer import stylize
from utils import list_images
IS_TRAINING = True
# for training
TRAINING_IMGS_PATH = 'MS_COCO'
ENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'
MODEL_SAVE_PATH = 'models/autoencoder'
MODEL_SAVE_SUFFIX = '-done'
DEBUG = True
LOGGING_PERIOD = 10
AUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]
# for inferring (stylize)
CONTENTS_DIR = 'images/content'
STYLES_DIR = 'images/style'
OUTPUT_DIR = 'outputs'
STYLE_RATIO = 0.8
REPEAT_PIPELINE = 1
AUTUENCODER_LEVELS_INFER = [3, 2, 1]
def main():
if IS_TRAINING:
training_imgs_paths = list_images(TRAINING_IMGS_PATH)
train(training_imgs_paths,
ENCODER_WEIGHTS_PATH,
MODEL_SAVE_PATH,
autoencoder_levels=AUTUENCODER_LEVELS_TRAIN,
debug=DEBUG,
logging_period=LOGGING_PERIOD)
print('\n>>>>>> Successfully done training...\n')
else:
contents_path = list_images(CONTENTS_DIR)
styles_path = list_images(STYLES_DIR)
model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX
stylize(contents_path,
styles_path,
OUTPUT_DIR,
ENCODER_WEIGHTS_PATH,
model_path,
style_ratio=STYLE_RATIO,
repeat_pipeline=REPEAT_PIPELINE,
autoencoder_levels=AUTUENCODER_LEVELS_INFER)
print('\n>>>>>> Successfully done stylizing...\n')
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "31ed798118f20005b5a26bc1fc0053b7d0a95657",
"index": 5366,
"step-1": "<mask token>\n\n\ndef main():\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n print('\\n>>>>>> Successfully done training...\\n')\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n stylize(contents_path, styles_path, OUTPUT_DIR,\n ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=\n AUTUENCODER_LEVELS_INFER)\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n print('\\n>>>>>> Successfully done training...\\n')\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n stylize(contents_path, styles_path, OUTPUT_DIR,\n ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=\n AUTUENCODER_LEVELS_INFER)\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nIS_TRAINING = True\nTRAINING_IMGS_PATH = 'MS_COCO'\nENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'\nMODEL_SAVE_PATH = 'models/autoencoder'\nMODEL_SAVE_SUFFIX = '-done'\nDEBUG = True\nLOGGING_PERIOD = 10\nAUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]\nCONTENTS_DIR = 'images/content'\nSTYLES_DIR = 'images/style'\nOUTPUT_DIR = 'outputs'\nSTYLE_RATIO = 0.8\nREPEAT_PIPELINE = 1\nAUTUENCODER_LEVELS_INFER = [3, 2, 1]\n\n\ndef main():\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n print('\\n>>>>>> Successfully done training...\\n')\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n stylize(contents_path, styles_path, OUTPUT_DIR,\n ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=\n AUTUENCODER_LEVELS_INFER)\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from __future__ import print_function\nfrom train import train\nfrom infer import stylize\nfrom utils import list_images\nIS_TRAINING = True\nTRAINING_IMGS_PATH = 'MS_COCO'\nENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'\nMODEL_SAVE_PATH = 'models/autoencoder'\nMODEL_SAVE_SUFFIX = '-done'\nDEBUG = True\nLOGGING_PERIOD = 10\nAUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]\nCONTENTS_DIR = 'images/content'\nSTYLES_DIR = 'images/style'\nOUTPUT_DIR = 'outputs'\nSTYLE_RATIO = 0.8\nREPEAT_PIPELINE = 1\nAUTUENCODER_LEVELS_INFER = [3, 2, 1]\n\n\ndef main():\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n train(training_imgs_paths, ENCODER_WEIGHTS_PATH, MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN, debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n print('\\n>>>>>> Successfully done training...\\n')\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n stylize(contents_path, styles_path, OUTPUT_DIR,\n ENCODER_WEIGHTS_PATH, model_path, style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE, autoencoder_levels=\n AUTUENCODER_LEVELS_INFER)\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# Demo - train the decoders & use them to stylize image\n\nfrom __future__ import print_function\n\nfrom train import train\nfrom infer import stylize\nfrom utils import list_images\n\n\nIS_TRAINING = True\n\n# for training\nTRAINING_IMGS_PATH = 'MS_COCO'\nENCODER_WEIGHTS_PATH = 'vgg19_normalised.npz'\nMODEL_SAVE_PATH = 'models/autoencoder'\nMODEL_SAVE_SUFFIX = '-done'\n\nDEBUG = True\nLOGGING_PERIOD = 10\nAUTUENCODER_LEVELS_TRAIN = [5, 4, 3, 2, 1]\n\n# for inferring (stylize)\nCONTENTS_DIR = 'images/content'\nSTYLES_DIR = 'images/style'\nOUTPUT_DIR = 'outputs'\n\nSTYLE_RATIO = 0.8\nREPEAT_PIPELINE = 1\nAUTUENCODER_LEVELS_INFER = [3, 2, 1]\n\n\ndef main():\n\n if IS_TRAINING:\n training_imgs_paths = list_images(TRAINING_IMGS_PATH)\n\n train(training_imgs_paths,\n ENCODER_WEIGHTS_PATH,\n MODEL_SAVE_PATH,\n autoencoder_levels=AUTUENCODER_LEVELS_TRAIN,\n debug=DEBUG,\n logging_period=LOGGING_PERIOD)\n \n print('\\n>>>>>> Successfully done training...\\n')\n\n else:\n contents_path = list_images(CONTENTS_DIR)\n styles_path = list_images(STYLES_DIR)\n model_path = MODEL_SAVE_PATH + MODEL_SAVE_SUFFIX\n\n stylize(contents_path, \n styles_path, \n OUTPUT_DIR, \n ENCODER_WEIGHTS_PATH, \n model_path, \n style_ratio=STYLE_RATIO,\n repeat_pipeline=REPEAT_PIPELINE,\n autoencoder_levels=AUTUENCODER_LEVELS_INFER)\n\n print('\\n>>>>>> Successfully done stylizing...\\n')\n\n\nif __name__ == '__main__':\n main()\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def my_loss():
return nn.CrossEntropyLoss()
<|reserved_special_token_1|>
import torch.nn as nn
def my_loss():
return nn.CrossEntropyLoss()
|
flexible
|
{
"blob_id": "418f2e1cbe4fb3ef369e981e72bf40eeddfd052e",
"index": 2408,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef my_loss():\n return nn.CrossEntropyLoss()\n",
"step-3": "import torch.nn as nn\n\n\ndef my_loss():\n return nn.CrossEntropyLoss()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(args):
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
unet_model = UnetModel(args)
unet_model.prepare_data(args)
unet_model.create_model(args)
unet_model.train(args)
unet_model.load_best_model(args, load_dir=args.savedir)
unet_model.evaluate(args)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def main(args):
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
unet_model = UnetModel(args)
unet_model.prepare_data(args)
unet_model.create_model(args)
unet_model.train(args)
unet_model.load_best_model(args, load_dir=args.savedir)
unet_model.evaluate(args)
if __name__ == '__main__':
main(args)
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
from arg_parser import args
from model_object import UnetModel
def main(args):
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
unet_model = UnetModel(args)
unet_model.prepare_data(args)
unet_model.create_model(args)
unet_model.train(args)
unet_model.load_best_model(args, load_dir=args.savedir)
unet_model.evaluate(args)
if __name__ == '__main__':
main(args)
<|reserved_special_token_1|>
import numpy as np
import tensorflow as tf
from arg_parser import args
from model_object import UnetModel
def main(args):
np.random.seed(args.random_seed)
tf.random.set_seed(args.random_seed)
unet_model = UnetModel(args)
unet_model.prepare_data(args)
unet_model.create_model(args)
unet_model.train(args)
unet_model.load_best_model(args, load_dir= args.savedir)
unet_model.evaluate(args)
if __name__ == "__main__":
main(args)
|
flexible
|
{
"blob_id": "588f6f78908e47e0b3f1bc42fffabad34766eede",
"index": 9815,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\nif __name__ == '__main__':\n main(args)\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nfrom arg_parser import args\nfrom model_object import UnetModel\n\n\ndef main(args):\n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n unet_model = UnetModel(args)\n unet_model.prepare_data(args)\n unet_model.create_model(args)\n unet_model.train(args)\n unet_model.load_best_model(args, load_dir=args.savedir)\n unet_model.evaluate(args)\n\n\nif __name__ == '__main__':\n main(args)\n",
"step-5": "import numpy as np\nimport tensorflow as tf\n\nfrom arg_parser import args\nfrom model_object import UnetModel\n\ndef main(args):\n \n np.random.seed(args.random_seed)\n tf.random.set_seed(args.random_seed)\n\n unet_model = UnetModel(args) \n\n unet_model.prepare_data(args)\n\n unet_model.create_model(args)\n\n unet_model.train(args)\n\n unet_model.load_best_model(args, load_dir= args.savedir)\n\n unet_model.evaluate(args)\n\nif __name__ == \"__main__\":\n main(args)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_astype_invalid_nas_to_tdt64_raises():
idx = Index([NaT.asm8] * 2, dtype=object)
msg = 'Cannot cast Index to dtype timedelta64\\[ns\\]'
with pytest.raises(TypeError, match=msg):
idx.astype('m8[ns]')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def test_astype_str_from_bytes():
idx = Index(['あ', b'a'], dtype='object')
result = idx.astype(str)
expected = Index(['あ', 'a'], dtype='object')
tm.assert_index_equal(result, expected)
def test_astype_invalid_nas_to_tdt64_raises():
idx = Index([NaT.asm8] * 2, dtype=object)
msg = 'Cannot cast Index to dtype timedelta64\\[ns\\]'
with pytest.raises(TypeError, match=msg):
idx.astype('m8[ns]')
<|reserved_special_token_1|>
import pytest
from pandas import Index, NaT
import pandas._testing as tm
def test_astype_str_from_bytes():
idx = Index(['あ', b'a'], dtype='object')
result = idx.astype(str)
expected = Index(['あ', 'a'], dtype='object')
tm.assert_index_equal(result, expected)
def test_astype_invalid_nas_to_tdt64_raises():
idx = Index([NaT.asm8] * 2, dtype=object)
msg = 'Cannot cast Index to dtype timedelta64\\[ns\\]'
with pytest.raises(TypeError, match=msg):
idx.astype('m8[ns]')
<|reserved_special_token_1|>
import pytest
from pandas import (
Index,
NaT,
)
import pandas._testing as tm
def test_astype_str_from_bytes():
# https://github.com/pandas-dev/pandas/issues/38607
idx = Index(["あ", b"a"], dtype="object")
result = idx.astype(str)
expected = Index(["あ", "a"], dtype="object")
tm.assert_index_equal(result, expected)
def test_astype_invalid_nas_to_tdt64_raises():
# GH#45722 don't cast np.datetime64 NaTs to timedelta64 NaT
idx = Index([NaT.asm8] * 2, dtype=object)
msg = r"Cannot cast Index to dtype timedelta64\[ns\]"
with pytest.raises(TypeError, match=msg):
idx.astype("m8[ns]")
|
flexible
|
{
"blob_id": "13b2fea09f5a4300563dd8870fe1841b47756b36",
"index": 9972,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-3": "<mask token>\n\n\ndef test_astype_str_from_bytes():\n idx = Index(['あ', b'a'], dtype='object')\n result = idx.astype(str)\n expected = Index(['あ', 'a'], dtype='object')\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-4": "import pytest\nfrom pandas import Index, NaT\nimport pandas._testing as tm\n\n\ndef test_astype_str_from_bytes():\n idx = Index(['あ', b'a'], dtype='object')\n result = idx.astype(str)\n expected = Index(['あ', 'a'], dtype='object')\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n idx = Index([NaT.asm8] * 2, dtype=object)\n msg = 'Cannot cast Index to dtype timedelta64\\\\[ns\\\\]'\n with pytest.raises(TypeError, match=msg):\n idx.astype('m8[ns]')\n",
"step-5": "import pytest\n\nfrom pandas import (\n Index,\n NaT,\n)\nimport pandas._testing as tm\n\n\ndef test_astype_str_from_bytes():\n # https://github.com/pandas-dev/pandas/issues/38607\n idx = Index([\"あ\", b\"a\"], dtype=\"object\")\n result = idx.astype(str)\n expected = Index([\"あ\", \"a\"], dtype=\"object\")\n tm.assert_index_equal(result, expected)\n\n\ndef test_astype_invalid_nas_to_tdt64_raises():\n # GH#45722 don't cast np.datetime64 NaTs to timedelta64 NaT\n idx = Index([NaT.asm8] * 2, dtype=object)\n\n msg = r\"Cannot cast Index to dtype timedelta64\\[ns\\]\"\n with pytest.raises(TypeError, match=msg):\n idx.astype(\"m8[ns]\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Database(object):
def __init__(self):
self.client = MongoClient(config['db']['url'])
self.db = self.client[config['db']['name']]
<|reserved_special_token_0|>
def find(self, criteria, collection_name, projection=None, sort=None,
limit=0, cursor=False):
if '_id' in criteria:
criteria['_id'] = ObjectId(criteria['_id'])
found = self.db[collection_name].find(filter=criteria, projection=
projection, limit=limit, sort=sort)
if cursor:
return found
found = list(found)
for i in range(len(found)):
if '_id' in found[i]:
found[i]['_id'] = str(found[i]['_id'])
return found
def find_by_id(self, id, collection_name):
found = self.db[collection_name].find_one({'_id': ObjectId(id)})
if found is None:
return not found
if '_id' in found:
found['_id'] = str(found['_id'])
return found
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Database(object):
def __init__(self):
self.client = MongoClient(config['db']['url'])
self.db = self.client[config['db']['name']]
<|reserved_special_token_0|>
def find(self, criteria, collection_name, projection=None, sort=None,
limit=0, cursor=False):
if '_id' in criteria:
criteria['_id'] = ObjectId(criteria['_id'])
found = self.db[collection_name].find(filter=criteria, projection=
projection, limit=limit, sort=sort)
if cursor:
return found
found = list(found)
for i in range(len(found)):
if '_id' in found[i]:
found[i]['_id'] = str(found[i]['_id'])
return found
def find_by_id(self, id, collection_name):
found = self.db[collection_name].find_one({'_id': ObjectId(id)})
if found is None:
return not found
if '_id' in found:
found['_id'] = str(found['_id'])
return found
def update(self, id, element, collection_name):
criteria = {'_id': ObjectId(id)}
element['updated'] = datetime.now()
set_obj = {'$set': element}
updated = self.db[collection_name].update_one(criteria, set_obj)
if updated.matched_count == 1:
return 'Record Successfully Updated'
def delete(self, id, collection_name):
deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})
return bool(deleted.deleted_count)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Database(object):
def __init__(self):
self.client = MongoClient(config['db']['url'])
self.db = self.client[config['db']['name']]
def insert(self, element, collection_name):
element['created'] = datetime.now()
element['updated'] = datetime.now()
inserted = self.db[collection_name].insert_one(element)
return str(inserted.inserted_id)
def find(self, criteria, collection_name, projection=None, sort=None,
limit=0, cursor=False):
if '_id' in criteria:
criteria['_id'] = ObjectId(criteria['_id'])
found = self.db[collection_name].find(filter=criteria, projection=
projection, limit=limit, sort=sort)
if cursor:
return found
found = list(found)
for i in range(len(found)):
if '_id' in found[i]:
found[i]['_id'] = str(found[i]['_id'])
return found
def find_by_id(self, id, collection_name):
found = self.db[collection_name].find_one({'_id': ObjectId(id)})
if found is None:
return not found
if '_id' in found:
found['_id'] = str(found['_id'])
return found
def update(self, id, element, collection_name):
criteria = {'_id': ObjectId(id)}
element['updated'] = datetime.now()
set_obj = {'$set': element}
updated = self.db[collection_name].update_one(criteria, set_obj)
if updated.matched_count == 1:
return 'Record Successfully Updated'
def delete(self, id, collection_name):
deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})
return bool(deleted.deleted_count)
<|reserved_special_token_1|>
from datetime import datetime
from pymongo import MongoClient
from bson import ObjectId
from config import config
class Database(object):
def __init__(self):
self.client = MongoClient(config['db']['url'])
self.db = self.client[config['db']['name']]
def insert(self, element, collection_name):
element['created'] = datetime.now()
element['updated'] = datetime.now()
inserted = self.db[collection_name].insert_one(element)
return str(inserted.inserted_id)
def find(self, criteria, collection_name, projection=None, sort=None,
limit=0, cursor=False):
if '_id' in criteria:
criteria['_id'] = ObjectId(criteria['_id'])
found = self.db[collection_name].find(filter=criteria, projection=
projection, limit=limit, sort=sort)
if cursor:
return found
found = list(found)
for i in range(len(found)):
if '_id' in found[i]:
found[i]['_id'] = str(found[i]['_id'])
return found
def find_by_id(self, id, collection_name):
found = self.db[collection_name].find_one({'_id': ObjectId(id)})
if found is None:
return not found
if '_id' in found:
found['_id'] = str(found['_id'])
return found
def update(self, id, element, collection_name):
criteria = {'_id': ObjectId(id)}
element['updated'] = datetime.now()
set_obj = {'$set': element}
updated = self.db[collection_name].update_one(criteria, set_obj)
if updated.matched_count == 1:
return 'Record Successfully Updated'
def delete(self, id, collection_name):
deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})
return bool(deleted.deleted_count)
<|reserved_special_token_1|>
from datetime import datetime
from pymongo import MongoClient
from bson import ObjectId
from config import config
class Database(object):
def __init__(self):
self.client = MongoClient(config['db']['url']) # configure db url
self.db = self.client[config['db']['name']] # configure db name
def insert(self, element, collection_name):
element["created"] = datetime.now()
element["updated"] = datetime.now()
inserted = self.db[collection_name].insert_one(element) # insert data to db
return str(inserted.inserted_id)
def find(self, criteria, collection_name, projection=None, sort=None, limit=0, cursor=False): # find all from db
if "_id" in criteria:
criteria["_id"] = ObjectId(criteria["_id"])
found = self.db[collection_name].find(filter=criteria, projection=projection, limit=limit, sort=sort)
if cursor:
return found
found = list(found)
for i in range(len(found)): # to serialize object id need to convert string
if "_id" in found[i]:
found[i]["_id"] = str(found[i]["_id"])
return found
def find_by_id(self, id, collection_name):
found = self.db[collection_name].find_one({"_id": ObjectId(id)})
if found is None:
return not found
if "_id" in found:
found["_id"] = str(found["_id"])
return found
def update(self, id, element, collection_name):
criteria = {"_id": ObjectId(id)}
element["updated"] = datetime.now()
set_obj = {"$set": element} # update value
updated = self.db[collection_name].update_one(criteria, set_obj)
if updated.matched_count == 1:
return "Record Successfully Updated"
def delete(self, id, collection_name):
deleted = self.db[collection_name].delete_one({"_id": ObjectId(id)})
return bool(deleted.deleted_count)
|
flexible
|
{
"blob_id": "bcc76e4dbcc191e7912085cbb92c5b0ebd2b047b",
"index": 6550,
"step-1": "<mask token>\n\n\nclass Database(object):\n\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n <mask token>\n\n def find(self, criteria, collection_name, projection=None, sort=None,\n limit=0, cursor=False):\n if '_id' in criteria:\n criteria['_id'] = ObjectId(criteria['_id'])\n found = self.db[collection_name].find(filter=criteria, projection=\n projection, limit=limit, sort=sort)\n if cursor:\n return found\n found = list(found)\n for i in range(len(found)):\n if '_id' in found[i]:\n found[i]['_id'] = str(found[i]['_id'])\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({'_id': ObjectId(id)})\n if found is None:\n return not found\n if '_id' in found:\n found['_id'] = str(found['_id'])\n return found\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Database(object):\n\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n <mask token>\n\n def find(self, criteria, collection_name, projection=None, sort=None,\n limit=0, cursor=False):\n if '_id' in criteria:\n criteria['_id'] = ObjectId(criteria['_id'])\n found = self.db[collection_name].find(filter=criteria, projection=\n projection, limit=limit, sort=sort)\n if cursor:\n return found\n found = list(found)\n for i in range(len(found)):\n if '_id' in found[i]:\n found[i]['_id'] = str(found[i]['_id'])\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({'_id': ObjectId(id)})\n if found is None:\n return not found\n if '_id' in found:\n found['_id'] = str(found['_id'])\n return found\n\n def update(self, id, element, collection_name):\n criteria = {'_id': ObjectId(id)}\n element['updated'] = datetime.now()\n set_obj = {'$set': element}\n updated = self.db[collection_name].update_one(criteria, set_obj)\n if updated.matched_count == 1:\n return 'Record Successfully Updated'\n\n def delete(self, id, collection_name):\n deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})\n return bool(deleted.deleted_count)\n",
"step-3": "<mask token>\n\n\nclass Database(object):\n\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n\n def insert(self, element, collection_name):\n element['created'] = datetime.now()\n element['updated'] = datetime.now()\n inserted = self.db[collection_name].insert_one(element)\n return str(inserted.inserted_id)\n\n def find(self, criteria, collection_name, projection=None, sort=None,\n limit=0, cursor=False):\n if '_id' in criteria:\n criteria['_id'] = ObjectId(criteria['_id'])\n found = self.db[collection_name].find(filter=criteria, projection=\n projection, limit=limit, sort=sort)\n if cursor:\n return found\n found = list(found)\n for i in range(len(found)):\n if '_id' in found[i]:\n found[i]['_id'] = str(found[i]['_id'])\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({'_id': ObjectId(id)})\n if found is None:\n return not found\n if '_id' in found:\n found['_id'] = str(found['_id'])\n return found\n\n def update(self, id, element, collection_name):\n criteria = {'_id': ObjectId(id)}\n element['updated'] = datetime.now()\n set_obj = {'$set': element}\n updated = self.db[collection_name].update_one(criteria, set_obj)\n if updated.matched_count == 1:\n return 'Record Successfully Updated'\n\n def delete(self, id, collection_name):\n deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})\n return bool(deleted.deleted_count)\n",
"step-4": "from datetime import datetime\nfrom pymongo import MongoClient\nfrom bson import ObjectId\nfrom config import config\n\n\nclass Database(object):\n\n def __init__(self):\n self.client = MongoClient(config['db']['url'])\n self.db = self.client[config['db']['name']]\n\n def insert(self, element, collection_name):\n element['created'] = datetime.now()\n element['updated'] = datetime.now()\n inserted = self.db[collection_name].insert_one(element)\n return str(inserted.inserted_id)\n\n def find(self, criteria, collection_name, projection=None, sort=None,\n limit=0, cursor=False):\n if '_id' in criteria:\n criteria['_id'] = ObjectId(criteria['_id'])\n found = self.db[collection_name].find(filter=criteria, projection=\n projection, limit=limit, sort=sort)\n if cursor:\n return found\n found = list(found)\n for i in range(len(found)):\n if '_id' in found[i]:\n found[i]['_id'] = str(found[i]['_id'])\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({'_id': ObjectId(id)})\n if found is None:\n return not found\n if '_id' in found:\n found['_id'] = str(found['_id'])\n return found\n\n def update(self, id, element, collection_name):\n criteria = {'_id': ObjectId(id)}\n element['updated'] = datetime.now()\n set_obj = {'$set': element}\n updated = self.db[collection_name].update_one(criteria, set_obj)\n if updated.matched_count == 1:\n return 'Record Successfully Updated'\n\n def delete(self, id, collection_name):\n deleted = self.db[collection_name].delete_one({'_id': ObjectId(id)})\n return bool(deleted.deleted_count)\n",
"step-5": "from datetime import datetime\nfrom pymongo import MongoClient\nfrom bson import ObjectId\n\nfrom config import config\n\n\nclass Database(object):\n def __init__(self):\n self.client = MongoClient(config['db']['url']) # configure db url\n self.db = self.client[config['db']['name']] # configure db name\n\n def insert(self, element, collection_name):\n element[\"created\"] = datetime.now()\n element[\"updated\"] = datetime.now()\n inserted = self.db[collection_name].insert_one(element) # insert data to db\n return str(inserted.inserted_id)\n\n def find(self, criteria, collection_name, projection=None, sort=None, limit=0, cursor=False): # find all from db\n\n if \"_id\" in criteria:\n criteria[\"_id\"] = ObjectId(criteria[\"_id\"])\n\n found = self.db[collection_name].find(filter=criteria, projection=projection, limit=limit, sort=sort)\n\n if cursor:\n return found\n\n found = list(found)\n\n for i in range(len(found)): # to serialize object id need to convert string\n if \"_id\" in found[i]:\n found[i][\"_id\"] = str(found[i][\"_id\"])\n\n return found\n\n def find_by_id(self, id, collection_name):\n found = self.db[collection_name].find_one({\"_id\": ObjectId(id)})\n \n if found is None:\n return not found\n \n if \"_id\" in found:\n found[\"_id\"] = str(found[\"_id\"])\n\n return found\n\n def update(self, id, element, collection_name):\n criteria = {\"_id\": ObjectId(id)}\n\n element[\"updated\"] = datetime.now()\n set_obj = {\"$set\": element} # update value\n\n updated = self.db[collection_name].update_one(criteria, set_obj)\n if updated.matched_count == 1:\n return \"Record Successfully Updated\"\n\n def delete(self, id, collection_name):\n deleted = self.db[collection_name].delete_one({\"_id\": ObjectId(id)})\n return bool(deleted.deleted_count)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
@fixture
def authenticated_author_client(user, client: Client) ->Client:
token = Token.objects.get_or_create(user=user)[0].key
client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'
print(client)
return client
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@fixture
def user(db):
return User.objects.create(username='test', email=
'[email protected]', password='test')
@fixture
def authenticated_author_client(user, client: Client) ->Client:
token = Token.objects.get_or_create(user=user)[0].key
client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'
print(client)
return client
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@fixture
def user(db):
return User.objects.create(username='test', email=
'[email protected]', password='test')
@fixture
def authenticated_author_client(user, client: Client) ->Client:
token = Token.objects.get_or_create(user=user)[0].key
client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'
print(client)
return client
@fixture
def todo(db, user):
return ToDoList.objects.create(user=user, title='Test task',
description='Uchet kz test task', deadline=localdate(), executed=False)
<|reserved_special_token_1|>
from django.contrib.auth.models import User
from django.test import Client
from django.utils.timezone import localdate
from pytest import fixture
from operations.models import ToDoList
@fixture
def user(db):
return User.objects.create(username='test', email=
'[email protected]', password='test')
@fixture
def authenticated_author_client(user, client: Client) ->Client:
token = Token.objects.get_or_create(user=user)[0].key
client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'
print(client)
return client
@fixture
def todo(db, user):
return ToDoList.objects.create(user=user, title='Test task',
description='Uchet kz test task', deadline=localdate(), executed=False)
<|reserved_special_token_1|>
from django.contrib.auth.models import User
from django.test import Client
from django.utils.timezone import localdate
from pytest import fixture
from operations.models import ToDoList
@fixture
def user(db):
return User.objects.create(
username='test', email='[email protected]',
password='test',
)
@fixture
def authenticated_author_client(
user, client: Client
) -> Client:
token = Token.objects.get_or_create(user=user)[0].key
client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'
print(client)
return client
@fixture
def todo(db, user):
return ToDoList.objects.create(
user=user,
title='Test task',
description='Uchet kz test task',
deadline=localdate(),
executed=False
)
|
flexible
|
{
"blob_id": "347d468f15dee8a8219d201251cedffe21352f7c",
"index": 8813,
"step-1": "<mask token>\n\n\n@fixture\ndef authenticated_author_client(user, client: Client) ->Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@fixture\ndef user(db):\n return User.objects.create(username='test', email=\n '[email protected]', password='test')\n\n\n@fixture\ndef authenticated_author_client(user, client: Client) ->Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@fixture\ndef user(db):\n return User.objects.create(username='test', email=\n '[email protected]', password='test')\n\n\n@fixture\ndef authenticated_author_client(user, client: Client) ->Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n@fixture\ndef todo(db, user):\n return ToDoList.objects.create(user=user, title='Test task',\n description='Uchet kz test task', deadline=localdate(), executed=False)\n",
"step-4": "from django.contrib.auth.models import User\nfrom django.test import Client\nfrom django.utils.timezone import localdate\nfrom pytest import fixture\nfrom operations.models import ToDoList\n\n\n@fixture\ndef user(db):\n return User.objects.create(username='test', email=\n '[email protected]', password='test')\n\n\n@fixture\ndef authenticated_author_client(user, client: Client) ->Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n@fixture\ndef todo(db, user):\n return ToDoList.objects.create(user=user, title='Test task',\n description='Uchet kz test task', deadline=localdate(), executed=False)\n",
"step-5": "from django.contrib.auth.models import User\nfrom django.test import Client\nfrom django.utils.timezone import localdate\nfrom pytest import fixture\n\nfrom operations.models import ToDoList\n\n\n@fixture\ndef user(db):\n return User.objects.create(\n username='test', email='[email protected]',\n password='test',\n )\n\n\n@fixture\ndef authenticated_author_client(\n user, client: Client\n) -> Client:\n token = Token.objects.get_or_create(user=user)[0].key\n client.defaults['HTTP_AUTHORIZATION'] = f'Token {token}'\n print(client)\n return client\n\n\n@fixture\ndef todo(db, user):\n return ToDoList.objects.create(\n user=user,\n title='Test task',\n description='Uchet kz test task',\n deadline=localdate(),\n executed=False\n )\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
class Solution:
def asteroidCollision(self, asteroids: List[int]) ->List[int]:
output = []
index = 0
for i in asteroids:
if len(output) == 0:
index = 0
if index == 0:
output.append(i)
index += 1
continue
elif output[-1] < 0 and i >= 0:
output.append(i)
elif output[-1] >= 0 and i >= 0:
output.append(i)
else:
append = True
while True:
if output[-1] < 0:
break
elif abs(output[-1]) == abs(i):
del output[-1]
append = False
break
elif abs(output[-1]) < abs(i):
del output[-1]
else:
append = False
break
if len(output) == 0:
break
if append:
output.append(i)
return output
|
normal
|
{
"blob_id": "fef4749ce7b8668a5a138aa1245010866a85c853",
"index": 2485,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def asteroidCollision(self, asteroids: List[int]) ->List[int]:\n output = []\n index = 0\n for i in asteroids:\n if len(output) == 0:\n index = 0\n if index == 0:\n output.append(i)\n index += 1\n continue\n elif output[-1] < 0 and i >= 0:\n output.append(i)\n elif output[-1] >= 0 and i >= 0:\n output.append(i)\n else:\n append = True\n while True:\n if output[-1] < 0:\n break\n elif abs(output[-1]) == abs(i):\n del output[-1]\n append = False\n break\n elif abs(output[-1]) < abs(i):\n del output[-1]\n else:\n append = False\n break\n if len(output) == 0:\n break\n if append:\n output.append(i)\n return output\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class s3Obj:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class s3Obj:
<|reserved_special_token_0|>
def getACL(self, client_s3):
"""
get ACL info and update the object
"""
try:
response = client_s3.get_object_acl(Bucket=self.bucket_name,
Key=self.name)
for permission in response['Grants']:
self.acl.append(permission['Permission'])
except botocore.exceptions.ClientError as e:
raise
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class s3Obj:
def __init__(self, name, bucket_name, size, last_modified, storage_class):
self.name = name
self.size = size
self.last_modified = last_modified
self.storage_class = storage_class
self.bucket_name = bucket_name
self.acl = []
def getACL(self, client_s3):
"""
get ACL info and update the object
"""
try:
response = client_s3.get_object_acl(Bucket=self.bucket_name,
Key=self.name)
for permission in response['Grants']:
self.acl.append(permission['Permission'])
except botocore.exceptions.ClientError as e:
raise
<|reserved_special_token_1|>
import botocore
class s3Obj:
def __init__(self, name, bucket_name, size, last_modified, storage_class):
self.name = name
self.size = size
self.last_modified = last_modified
self.storage_class = storage_class
self.bucket_name = bucket_name
self.acl = []
def getACL(self, client_s3):
"""
get ACL info and update the object
"""
try:
response = client_s3.get_object_acl(Bucket=self.bucket_name,
Key=self.name)
for permission in response['Grants']:
self.acl.append(permission['Permission'])
except botocore.exceptions.ClientError as e:
raise
<|reserved_special_token_1|>
import botocore
class s3Obj:
def __init__(self, name, bucket_name, size, last_modified, storage_class):
self.name = name
self.size = size
self.last_modified = last_modified
self.storage_class = storage_class
self.bucket_name = bucket_name
self.acl = []
def getACL(self, client_s3):
'''
get ACL info and update the object
'''
try:
response = client_s3.get_object_acl(Bucket=self.bucket_name, Key=self.name)
for permission in response['Grants']:
self.acl.append(permission['Permission'])
except botocore.exceptions.ClientError as e:
raise
|
flexible
|
{
"blob_id": "b3f376f4aec81cae853f996a74062e32bb4a8fa3",
"index": 2569,
"step-1": "<mask token>\n\n\nclass s3Obj:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass s3Obj:\n <mask token>\n\n def getACL(self, client_s3):\n \"\"\"\n get ACL info and update the object\n \"\"\"\n try:\n response = client_s3.get_object_acl(Bucket=self.bucket_name,\n Key=self.name)\n for permission in response['Grants']:\n self.acl.append(permission['Permission'])\n except botocore.exceptions.ClientError as e:\n raise\n",
"step-3": "<mask token>\n\n\nclass s3Obj:\n\n def __init__(self, name, bucket_name, size, last_modified, storage_class):\n self.name = name\n self.size = size\n self.last_modified = last_modified\n self.storage_class = storage_class\n self.bucket_name = bucket_name\n self.acl = []\n\n def getACL(self, client_s3):\n \"\"\"\n get ACL info and update the object\n \"\"\"\n try:\n response = client_s3.get_object_acl(Bucket=self.bucket_name,\n Key=self.name)\n for permission in response['Grants']:\n self.acl.append(permission['Permission'])\n except botocore.exceptions.ClientError as e:\n raise\n",
"step-4": "import botocore\n\n\nclass s3Obj:\n\n def __init__(self, name, bucket_name, size, last_modified, storage_class):\n self.name = name\n self.size = size\n self.last_modified = last_modified\n self.storage_class = storage_class\n self.bucket_name = bucket_name\n self.acl = []\n\n def getACL(self, client_s3):\n \"\"\"\n get ACL info and update the object\n \"\"\"\n try:\n response = client_s3.get_object_acl(Bucket=self.bucket_name,\n Key=self.name)\n for permission in response['Grants']:\n self.acl.append(permission['Permission'])\n except botocore.exceptions.ClientError as e:\n raise\n",
"step-5": "import botocore\n\nclass s3Obj:\n \n def __init__(self, name, bucket_name, size, last_modified, storage_class):\n \n self.name = name\n self.size = size\n self.last_modified = last_modified\n self.storage_class = storage_class\n self.bucket_name = bucket_name\n self.acl = []\n\n def getACL(self, client_s3):\n '''\n get ACL info and update the object\n '''\n \n try:\n \n response = client_s3.get_object_acl(Bucket=self.bucket_name, Key=self.name)\n \n for permission in response['Grants']:\n \n self.acl.append(permission['Permission'])\n \n except botocore.exceptions.ClientError as e:\n \n raise",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import socket
# Packet Sniffing
# It's All Binary
# Usage: python basic_sniffer.py
# create the sniffer raw socket object
sniffer = socket.socket(socket.AF_INET,socket.SOCK_RAW, socket.IPPROTO_ICMP)
#bind it to localhost
sniffer.bind(('0.0.0.0',0))
# make sure that the IP header is included
sniffer.setsockopt(socket.IPPROTO_IP,socket.IP_HDRINCL,1)
print 'sniffer is listening for incomming connections'
# get a single packet
print sniffer.recvfrom(65535)
|
normal
|
{
"blob_id": "9f2a8e78aa2e3eab8f74847443dec9083603da39",
"index": 3643,
"step-1": "import socket\n\n# Packet Sniffing\n# It's All Binary\n\n# Usage: python basic_sniffer.py \n\n# create the sniffer raw socket object\nsniffer = socket.socket(socket.AF_INET,socket.SOCK_RAW, socket.IPPROTO_ICMP)\n\n#bind it to localhost\nsniffer.bind(('0.0.0.0',0))\n\n# make sure that the IP header is included\nsniffer.setsockopt(socket.IPPROTO_IP,socket.IP_HDRINCL,1)\n\nprint 'sniffer is listening for incomming connections'\n\n# get a single packet\nprint sniffer.recvfrom(65535)\n\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
try:
print(dic[55])
except Exception as err:
print('Mensagem: ', err)
<|reserved_special_token_1|>
dic = {}
try:
print(dic[55])
except Exception as err:
print('Mensagem: ', err)
|
flexible
|
{
"blob_id": "618aa64c08ebf8d9a0bc9662195ece2bbd485c17",
"index": 1079,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntry:\n print(dic[55])\nexcept Exception as err:\n print('Mensagem: ', err)\n",
"step-3": "dic = {}\ntry:\n print(dic[55])\nexcept Exception as err:\n print('Mensagem: ', err)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -------------------------------
# --------- Set Methods ---------
# -------------------------------
# difference() return the values in the first set that not in the second set
set1 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set2 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set1)
print(set2)
print(set1.difference(set2))
print(set1-set2)
print(set2.difference(set1))
print(set2-set1)
print(set1)
print(set2)
print("*" * 40)
# difference_update() return the values in the first set that not in the second set
# and update the value for the first set with this values
set3 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set4 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set3)
set3.difference_update(set4)
print(set3)
print("*" * 40)
# intersection() return the values in the first set and in the second set
set5 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set6 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set5)
print(set5.intersection(set6))
print(set5)
print("*" * 40)
# intersection_update() return the values in the first set and in the second set
# and update the value for the first set with this values
set7 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set8 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set7)
set7.intersection_update(set8)
print(set7)
print("*" * 40)
# symmetric_difference() return the values in the first set and not in the second set
# and the values in the second set and not in the first set
set9 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set10 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set9)
print(set9.symmetric_difference(set10))
print(set9^set10)
print(set9)
print("*" * 40)
# symmetric_difference_update() return the values in the first set and not in the second set
# and the values in the second set and not in the first set
# and update the value for the first set with this values
set11 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}
set12 = {1, 2, 3, 4, 5, 6, "A", "B"}
print(set11)
set11.symmetric_difference_update(set12)
print(set11)
print("*" * 40)
|
normal
|
{
"blob_id": "faf2f5da92cf45cfedda91955688b3ca1c7c0db9",
"index": 8280,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1 - set2)\nprint(set2.difference(set1))\nprint(set2 - set1)\nprint(set1)\nprint(set2)\nprint('*' * 40)\n<mask token>\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint('*' * 40)\n<mask token>\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint('*' * 40)\n<mask token>\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint('*' * 40)\n<mask token>\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9 ^ set10)\nprint(set9)\nprint('*' * 40)\n<mask token>\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint('*' * 40)\n",
"step-3": "set1 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset2 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1 - set2)\nprint(set2.difference(set1))\nprint(set2 - set1)\nprint(set1)\nprint(set2)\nprint('*' * 40)\nset3 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset4 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint('*' * 40)\nset5 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset6 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint('*' * 40)\nset7 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset8 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint('*' * 40)\nset9 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset10 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9 ^ set10)\nprint(set9)\nprint('*' * 40)\nset11 = {1, 2, 3, 4, 5, 6, 7, 8, 9}\nset12 = {1, 2, 3, 4, 5, 6, 'A', 'B'}\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint('*' * 40)\n",
"step-4": "# -------------------------------\n# --------- Set Methods ---------\n# -------------------------------\n\n\n# difference() return the values in the first set that not in the second set\nset1 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset2 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set1)\nprint(set2)\nprint(set1.difference(set2))\nprint(set1-set2)\nprint(set2.difference(set1))\nprint(set2-set1)\nprint(set1)\nprint(set2)\n\nprint(\"*\" * 40)\n\n# difference_update() return the values in the first set that not in the second set\n# and update the value for the first set with this values\nset3 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset4 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set3)\nset3.difference_update(set4)\nprint(set3)\nprint(\"*\" * 40)\n\n# intersection() return the values in the first set and in the second set\nset5 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset6 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set5)\nprint(set5.intersection(set6))\nprint(set5)\nprint(\"*\" * 40)\n\n# intersection_update() return the values in the first set and in the second set\n# and update the value for the first set with this values\nset7 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset8 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set7)\nset7.intersection_update(set8)\nprint(set7)\nprint(\"*\" * 40)\n\n# symmetric_difference() return the values in the first set and not in the second set\n# and the values in the second set and not in the first set\nset9 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset10 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set9)\nprint(set9.symmetric_difference(set10))\nprint(set9^set10)\nprint(set9)\nprint(\"*\" * 40)\n\n# symmetric_difference_update() return the values in the first set and not in the second set\n# and the values in the second set and not in the first set\n# and update the value for the first set with this values\nset11 ={1, 2, 3, 4, 5, 6, 7, 8 , 9}\nset12 = {1, 2, 3, 4, 5, 6, \"A\", \"B\"}\nprint(set11)\nset11.symmetric_difference_update(set12)\nprint(set11)\nprint(\"*\" * 40)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# http://stackoverflow.com/questions/5276967/python-in-xcode-4
"""tv_write_xyzt2matlab.py: TremVibe Write Accelerometer XYZ and Timestamp to .m file"""
__author__ = "Salvador Aguinaga"
import sys
import MySQLdb
import math
from itertools import groupby
import csv
##########----------##########----------##########----------##########----------
def parse_email_records(row_count, emails):
#print np.shape(results)
'''Puts the filenames in the given iterable into a dictionary where
the key is the first component of the emails and the value is
a list of the records for that email.'''
keyfunc = lambda f: f.split('@', 1)[0]
return dict( (k, list(g)) for k,g in groupby(
sorted((i[0] for i in emails), key=keyfunc), key=keyfunc
) )
#db = MySQLdb.connect('192.168.1.112', 'tremcam', 'trem00xB','neurobit')
#cursor = db.cursor()
#
#query = sys.argv[1]
#cursor.execute(query)
#result = cursor.fetchall()
##########----------##########----------##########----------##########----------
if (len(sys.argv) < 1):
print "Usage: ql2matlabcsvdat.py hostname email-address" #`"sql query`""
print " : hostname = cloud server hostname running the mysql service"
exit(0)
else:
email = sys.argv[1]
server = 'localhost'
try:
conn = MySQLdb.Connection(server, 'triaxim', 'tria00xB', 'neurobit')
cursr = conn.cursor()
query = """SELECT accelX, accelY, accelZ, accelTs from TremCamTbl WHERE email =%s"""
cursr.execute(query,email)
conn.commit()
row_count = cursr.rowcount
results = cursr.fetchall()
#print type(results)
#print len(results)
f = open('/tmp/tv_user_accelxyzt.m', 'w')
#for t in results:
#line = ' '.join(str(x) for x in t)
# line = " ".join(map(str, results))
# f.write(line + '\n')
f.write('%% Matlab .m formated file \n')
f.write('%% Accelerometer data for user:%s \n' % (email))
f.write('%% Example how to plot X values of record 6:\n')
f.write('%% plot ((rec6matrix(4,:) - rec6matrix(4,1))/1e+6,rec6matrix(1,:))\n')
rec_no = 0
for record in results:
#f.write('%s %s %s %s\n' % tuple)
#print len(record)
rec_no +=1
f.write('%% record #:%d\n' % rec_no)
#f.write('%% Xvalues\bYvalues\bZvalues\bTimestamp\n')
if len(record) >= 4:
f.write('xVal%d = [%s];\n' % (rec_no,record[0]))
f.write('yVal%d = [%s];\n' % (rec_no,record[1]))
f.write('zVal%d = [%s];\n' % (rec_no,record[2]))
f.write('tVal%d = [%s];\n' % (rec_no,record[3]))
f.write('rec%dmatrix = [xVal%d;yVal%d;zVal%d;tVal%d];\n'%(rec_no,rec_no,rec_no,rec_no,rec_no))
f.close()
# emailDict = parse_email_records(row_count, results)
# with open("/var/www/tremvibe/subject_accel_sensor_dat.csv", "wb") as f:
# csv.writer(f).writerow(['Subject', 'Records'])
# for email in emailDict:
#print email,',',len(emailDict[email])
# csv.writer(f).writerow([email, len(emailDict[email])])
except MySQLdb.Error, e:
print "Error %d: %s" % (e.args[0], e.args[1])
sys.exit(1)
finally:
if conn:
conn.close()
|
normal
|
{
"blob_id": "4f21fb4168ed29b9540d3ca2b8cf6ef746c30831",
"index": 6732,
"step-1": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# http://stackoverflow.com/questions/5276967/python-in-xcode-4\n\n\"\"\"tv_write_xyzt2matlab.py: TremVibe Write Accelerometer XYZ and Timestamp to .m file\"\"\"\n__author__ = \"Salvador Aguinaga\"\n\nimport sys\nimport MySQLdb\nimport math\nfrom itertools import groupby\nimport csv\n\n##########----------##########----------##########----------##########----------\ndef parse_email_records(row_count, emails):\n #print np.shape(results)\n '''Puts the filenames in the given iterable into a dictionary where\n the key is the first component of the emails and the value is\n a list of the records for that email.'''\n keyfunc = lambda f: f.split('@', 1)[0]\n return dict( (k, list(g)) for k,g in groupby(\n sorted((i[0] for i in emails), key=keyfunc), key=keyfunc\n ) )\n#db = MySQLdb.connect('192.168.1.112', 'tremcam', 'trem00xB','neurobit')\n#cursor = db.cursor()\n#\n#query = sys.argv[1]\n#cursor.execute(query)\n#result = cursor.fetchall()\n\n##########----------##########----------##########----------##########----------\nif (len(sys.argv) < 1):\n print \"Usage: ql2matlabcsvdat.py hostname email-address\" #`\"sql query`\"\"\n print \" : hostname = cloud server hostname running the mysql service\"\n exit(0)\nelse:\n email = sys.argv[1]\n\nserver = 'localhost'\n\ntry:\n conn = MySQLdb.Connection(server, 'triaxim', 'tria00xB', 'neurobit')\n cursr = conn.cursor()\n query = \"\"\"SELECT accelX, accelY, accelZ, accelTs from TremCamTbl WHERE email =%s\"\"\"\n cursr.execute(query,email)\n conn.commit()\n row_count = cursr.rowcount\n results = cursr.fetchall()\n #print type(results)\n #print len(results)\n f = open('/tmp/tv_user_accelxyzt.m', 'w')\n #for t in results:\n #line = ' '.join(str(x) for x in t)\n # line = \" \".join(map(str, results))\n # f.write(line + '\\n')\n f.write('%% Matlab .m formated file \\n')\n f.write('%% Accelerometer data for user:%s \\n' % (email))\n f.write('%% Example how to plot X values of record 6:\\n')\n f.write('%% plot ((rec6matrix(4,:) - rec6matrix(4,1))/1e+6,rec6matrix(1,:))\\n')\n rec_no = 0\n for record in results:\n #f.write('%s %s %s %s\\n' % tuple)\n #print len(record)\n rec_no +=1\n f.write('%% record #:%d\\n' % rec_no)\n #f.write('%% Xvalues\\bYvalues\\bZvalues\\bTimestamp\\n')\n if len(record) >= 4:\n f.write('xVal%d = [%s];\\n' % (rec_no,record[0]))\n f.write('yVal%d = [%s];\\n' % (rec_no,record[1]))\n f.write('zVal%d = [%s];\\n' % (rec_no,record[2]))\n f.write('tVal%d = [%s];\\n' % (rec_no,record[3]))\n f.write('rec%dmatrix = [xVal%d;yVal%d;zVal%d;tVal%d];\\n'%(rec_no,rec_no,rec_no,rec_no,rec_no))\n \n f.close()\n# emailDict = parse_email_records(row_count, results)\n# with open(\"/var/www/tremvibe/subject_accel_sensor_dat.csv\", \"wb\") as f:\n # csv.writer(f).writerow(['Subject', 'Records'])\n# for email in emailDict:\n #print email,',',len(emailDict[email])\n# csv.writer(f).writerow([email, len(emailDict[email])])\n \nexcept MySQLdb.Error, e:\n print \"Error %d: %s\" % (e.args[0], e.args[1])\n sys.exit(1)\n\nfinally:\n \n if conn:\n conn.close()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/python
import sys
BLACK = '\033[30;0m'
RED = '\033[31;0m'
GREEN = '\033[32;0m'
YELLOW = '\033[33;0m'
BLUE = '\033[34;0m'
PINK = '\033[35;0m'
CBLUE = '\033[36;0m'
WHITE = '\033[37;0m'
def colorPrint(color, str):
print(color + str + '\033[0m');
def main():
if sys.argv.__len__() < 2:
print('Wrong usage, exit')
return
colorPrint(YELLOW, sys.argv[1])
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a49c00dab8d445ce0b08fd31a4a41d6c8976d662",
"index": 2263,
"step-1": "<mask token>\n\n\ndef colorPrint(color, str):\n print(color + str + '\\x1b[0m')\n\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef colorPrint(color, str):\n print(color + str + '\\x1b[0m')\n\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nBLACK = '\\x1b[30;0m'\nRED = '\\x1b[31;0m'\nGREEN = '\\x1b[32;0m'\nYELLOW = '\\x1b[33;0m'\nBLUE = '\\x1b[34;0m'\nPINK = '\\x1b[35;0m'\nCBLUE = '\\x1b[36;0m'\nWHITE = '\\x1b[37;0m'\n\n\ndef colorPrint(color, str):\n print(color + str + '\\x1b[0m')\n\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nBLACK = '\\x1b[30;0m'\nRED = '\\x1b[31;0m'\nGREEN = '\\x1b[32;0m'\nYELLOW = '\\x1b[33;0m'\nBLUE = '\\x1b[34;0m'\nPINK = '\\x1b[35;0m'\nCBLUE = '\\x1b[36;0m'\nWHITE = '\\x1b[37;0m'\n\n\ndef colorPrint(color, str):\n print(color + str + '\\x1b[0m')\n\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python\nimport sys\n\nBLACK = '\\033[30;0m'\nRED = '\\033[31;0m'\nGREEN = '\\033[32;0m'\nYELLOW = '\\033[33;0m'\nBLUE = '\\033[34;0m'\nPINK = '\\033[35;0m'\nCBLUE = '\\033[36;0m'\nWHITE = '\\033[37;0m'\n\ndef colorPrint(color, str):\n print(color + str + '\\033[0m');\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMian:
def test_mian(self):
MainPage().goto_marketpage().goto_search().search()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestMian:
def test_mian(self):
MainPage().goto_marketpage().goto_search().search()
if __name__ == '__main__':
pytest.main(['test_case.py', '-s', '-v'])
<|reserved_special_token_1|>
import pytest
from frame_project.实战2.main_page import MainPage
class TestMian:
def test_mian(self):
MainPage().goto_marketpage().goto_search().search()
if __name__ == '__main__':
pytest.main(['test_case.py', '-s', '-v'])
<|reserved_special_token_1|>
#encoding=utf-8
import pytest
from frame_project.实战2.main_page import MainPage
class TestMian:
def test_mian(self):
MainPage().goto_marketpage().goto_search().search()
if __name__ == '__main__':
pytest.main(['test_case.py','-s','-v'])
|
flexible
|
{
"blob_id": "e1751cc6f76f56e62cd02d61db65f1c27a4ff1b9",
"index": 7351,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\nif __name__ == '__main__':\n pytest.main(['test_case.py', '-s', '-v'])\n",
"step-4": "import pytest\nfrom frame_project.实战2.main_page import MainPage\n\n\nclass TestMian:\n\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\n\nif __name__ == '__main__':\n pytest.main(['test_case.py', '-s', '-v'])\n",
"step-5": "#encoding=utf-8\nimport pytest\n\nfrom frame_project.实战2.main_page import MainPage\n\n\nclass TestMian:\n def test_mian(self):\n MainPage().goto_marketpage().goto_search().search()\n\nif __name__ == '__main__':\n pytest.main(['test_case.py','-s','-v'])\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@Gtk.Template(resource_path=
'/com/github/bilelmoussaoui/Authenticator/settings.ui')
class SettingsWindow(Handy.PreferencesWindow):
<|reserved_special_token_0|>
dark_theme_switch: Gtk.Switch = Gtk.Template.Child()
night_light_switch: Gtk.Switch = Gtk.Template.Child()
lock_row: Handy.ExpanderRow = Gtk.Template.Child()
lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()
lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __on_enable_password(self, *_):
keyring = Keyring.get_default()
keyring.set_password_state(self.lock_row.props.enable_expansion)
if not keyring.has_password():
self._password_widget.set_current_password_visibility(False)
else:
self._password_widget.set_current_password_visibility(True)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@Gtk.Template(resource_path=
'/com/github/bilelmoussaoui/Authenticator/password_widget.ui')
class PasswordWidget(Gtk.Box):
__gtype_name__ = 'PasswordWidget'
__gsignals__ = {'password-updated': (GObject.SignalFlags.RUN_LAST, None,
()), 'password-deleted': (GObject.SignalFlags.RUN_LAST, None, ())}
delete_password_btn: Gtk.Button = Gtk.Template.Child()
change_password_btn: Gtk.Button = Gtk.Template.Child()
password_entry: Gtk.Entry = Gtk.Template.Child()
confirm_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_box: Gtk.Box = Gtk.Template.Child()
def __init__(self):
super(PasswordWidget, self).__init__()
self.parent = None
def reset_widgets(self):
"""Reset widgets state."""
self.password_entry.set_text('')
self.confirm_password_entry.set_text('')
self.current_password_entry.set_text('')
self.password_entry.get_style_context().remove_class('error')
self.confirm_password_entry.get_style_context().remove_class('error')
self.current_password_entry.get_style_context().remove_class('error')
self.change_password_btn.set_sensitive(False)
def set_current_password_visibility(self, visibilty: bool):
if not visibilty:
self.current_password_box.hide()
self.delete_password_btn.hide()
self.change_password_btn.set_label(_('Save Password'))
else:
self.current_password_box.show()
self.delete_password_btn.show()
self.change_password_btn.set_label(_('Change Password'))
@Gtk.Template.Callback('password_entry_changed')
def __validate_password(self, *_):
keyring = Keyring.get_default()
password = self.password_entry.get_text()
repeat_password = self.confirm_password_entry.get_text()
if not password:
self.password_entry.get_style_context().add_class('error')
valid_password = False
else:
self.password_entry.get_style_context().remove_class('error')
valid_password = True
if not repeat_password or password != repeat_password:
self.confirm_password_entry.get_style_context().add_class('error')
valid_repeat_password = False
else:
self.confirm_password_entry.get_style_context().remove_class(
'error')
valid_repeat_password = True
to_validate = [valid_password, valid_repeat_password]
if keyring.has_password():
old_password = self.current_password_entry.get_text()
if old_password != keyring.get_password():
self.current_password_entry.get_style_context().add_class(
'error')
valid_old_password = False
else:
self.current_password_entry.get_style_context().remove_class(
'error')
valid_old_password = True
to_validate.append(valid_old_password)
self.change_password_btn.set_sensitive(all(to_validate))
@Gtk.Template.Callback('update_password_clicked')
def __save_password(self, *__):
if self.change_password_btn.get_sensitive():
keyring = Keyring.get_default()
password = self.password_entry.get_text()
keyring.set_password(password)
self.reset_widgets()
self.set_current_password_visibility(True)
self.emit('password-updated')
@Gtk.Template.Callback('reset_password_clicked')
def __reset_password(self, *args):
dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)
dialog.props.message_type = Gtk.MessageType.QUESTION
dialog.props.text = _(
'Do you want to remove the authentication password?')
dialog.props.secondary_text = _(
'Authentication password enforces the privacy of your accounts.')
dialog.set_transient_for(self.parent)
response = dialog.run()
if response == Gtk.ResponseType.YES:
Keyring.get_default().remove_password()
self.reset_widgets()
self.set_current_password_visibility(False)
self.emit('password-deleted')
dialog.destroy()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@Gtk.Template(resource_path=
'/com/github/bilelmoussaoui/Authenticator/settings.ui')
class SettingsWindow(Handy.PreferencesWindow):
<|reserved_special_token_0|>
dark_theme_switch: Gtk.Switch = Gtk.Template.Child()
night_light_switch: Gtk.Switch = Gtk.Template.Child()
lock_row: Handy.ExpanderRow = Gtk.Template.Child()
lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()
lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _on_lock_row_expanded(self, *_):
keyring = Keyring.get_default()
if keyring.has_password():
keyring.set_password_state(self.lock_row.props.expanded)
self.lock_row_toggle_btn.props.active = False
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __on_enable_password(self, *_):
keyring = Keyring.get_default()
keyring.set_password_state(self.lock_row.props.enable_expansion)
if not keyring.has_password():
self._password_widget.set_current_password_visibility(False)
else:
self._password_widget.set_current_password_visibility(True)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@Gtk.Template(resource_path=
'/com/github/bilelmoussaoui/Authenticator/password_widget.ui')
class PasswordWidget(Gtk.Box):
__gtype_name__ = 'PasswordWidget'
__gsignals__ = {'password-updated': (GObject.SignalFlags.RUN_LAST, None,
()), 'password-deleted': (GObject.SignalFlags.RUN_LAST, None, ())}
delete_password_btn: Gtk.Button = Gtk.Template.Child()
change_password_btn: Gtk.Button = Gtk.Template.Child()
password_entry: Gtk.Entry = Gtk.Template.Child()
confirm_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_box: Gtk.Box = Gtk.Template.Child()
def __init__(self):
super(PasswordWidget, self).__init__()
self.parent = None
def reset_widgets(self):
"""Reset widgets state."""
self.password_entry.set_text('')
self.confirm_password_entry.set_text('')
self.current_password_entry.set_text('')
self.password_entry.get_style_context().remove_class('error')
self.confirm_password_entry.get_style_context().remove_class('error')
self.current_password_entry.get_style_context().remove_class('error')
self.change_password_btn.set_sensitive(False)
def set_current_password_visibility(self, visibilty: bool):
if not visibilty:
self.current_password_box.hide()
self.delete_password_btn.hide()
self.change_password_btn.set_label(_('Save Password'))
else:
self.current_password_box.show()
self.delete_password_btn.show()
self.change_password_btn.set_label(_('Change Password'))
@Gtk.Template.Callback('password_entry_changed')
def __validate_password(self, *_):
keyring = Keyring.get_default()
password = self.password_entry.get_text()
repeat_password = self.confirm_password_entry.get_text()
if not password:
self.password_entry.get_style_context().add_class('error')
valid_password = False
else:
self.password_entry.get_style_context().remove_class('error')
valid_password = True
if not repeat_password or password != repeat_password:
self.confirm_password_entry.get_style_context().add_class('error')
valid_repeat_password = False
else:
self.confirm_password_entry.get_style_context().remove_class(
'error')
valid_repeat_password = True
to_validate = [valid_password, valid_repeat_password]
if keyring.has_password():
old_password = self.current_password_entry.get_text()
if old_password != keyring.get_password():
self.current_password_entry.get_style_context().add_class(
'error')
valid_old_password = False
else:
self.current_password_entry.get_style_context().remove_class(
'error')
valid_old_password = True
to_validate.append(valid_old_password)
self.change_password_btn.set_sensitive(all(to_validate))
@Gtk.Template.Callback('update_password_clicked')
def __save_password(self, *__):
if self.change_password_btn.get_sensitive():
keyring = Keyring.get_default()
password = self.password_entry.get_text()
keyring.set_password(password)
self.reset_widgets()
self.set_current_password_visibility(True)
self.emit('password-updated')
@Gtk.Template.Callback('reset_password_clicked')
def __reset_password(self, *args):
dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)
dialog.props.message_type = Gtk.MessageType.QUESTION
dialog.props.text = _(
'Do you want to remove the authentication password?')
dialog.props.secondary_text = _(
'Authentication password enforces the privacy of your accounts.')
dialog.set_transient_for(self.parent)
response = dialog.run()
if response == Gtk.ResponseType.YES:
Keyring.get_default().remove_password()
self.reset_widgets()
self.set_current_password_visibility(False)
self.emit('password-deleted')
dialog.destroy()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@Gtk.Template(resource_path=
'/com/github/bilelmoussaoui/Authenticator/settings.ui')
class SettingsWindow(Handy.PreferencesWindow):
<|reserved_special_token_0|>
dark_theme_switch: Gtk.Switch = Gtk.Template.Child()
night_light_switch: Gtk.Switch = Gtk.Template.Child()
lock_row: Handy.ExpanderRow = Gtk.Template.Child()
lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()
lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()
def __init__(self):
super(SettingsWindow, self).__init__()
self.__init_widgets()
self.__bind_signals()
<|reserved_special_token_0|>
def _on_lock_row_expanded(self, *_):
keyring = Keyring.get_default()
if keyring.has_password():
keyring.set_password_state(self.lock_row.props.expanded)
self.lock_row_toggle_btn.props.active = False
<|reserved_special_token_0|>
def __bind_signals(self):
settings = Settings.get_default()
self.dark_theme_switch.set_active(settings.dark_theme and not
settings.night_light)
self.night_light_switch.set_active(settings.night_light)
settings.bind('night-light', self.night_light_switch, 'active', Gio
.SettingsBindFlags.DEFAULT)
keyring = Keyring.get_default()
self.lock_row.props.enable_expansion = keyring.has_password()
self.lock_row_toggle_btn = self.lock_row.get_children()[0
].get_children()[3]
self.lock_row.props.enable_expansion = Keyring.get_default(
).is_password_enabled()
self.lock_row.connect('notify::enable-expansion', self.
__on_enable_password)
self.lock_row_toggle_btn.connect('notify::active', self.
__on_lock_switch_toggled)
self.lock_row.connect('notify::expanded', self._on_lock_row_expanded)
keyring.bind_property('can-be-locked', self.lock_timeout_row,
'sensitive', GObject.BindingFlags.DEFAULT | GObject.
BindingFlags.SYNC_CREATE)
self.lock_timeout_spinbtn.props.value = settings.auto_lock_timeout
settings.bind('auto-lock-timeout', self.lock_timeout_spinbtn,
'value', Gio.SettingsBindFlags.DEFAULT)
self._password_widget.connect('password-updated', self.
__on_password_updated)
self._password_widget.connect('password-deleted', self.
__on_password_deleted)
def on_night_light_switch(switch: Gtk.Switch, _):
if switch.get_active() and self.dark_theme_switch.get_active():
self.dark_theme_switch.set_active(False)
self.night_light_switch.connect('notify::active', on_night_light_switch
)
def on_dark_theme_switch(switch: Gtk.Switch, _):
if settings.night_light and switch.get_active():
switch.set_state(False)
elif not settings.night_light:
settings.dark_theme = switch.get_active()
self.dark_theme_switch.connect('notify::active', on_dark_theme_switch)
def __on_enable_password(self, *_):
keyring = Keyring.get_default()
keyring.set_password_state(self.lock_row.props.enable_expansion)
if not keyring.has_password():
self._password_widget.set_current_password_visibility(False)
else:
self._password_widget.set_current_password_visibility(True)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@Gtk.Template(resource_path=
'/com/github/bilelmoussaoui/Authenticator/password_widget.ui')
class PasswordWidget(Gtk.Box):
__gtype_name__ = 'PasswordWidget'
__gsignals__ = {'password-updated': (GObject.SignalFlags.RUN_LAST, None,
()), 'password-deleted': (GObject.SignalFlags.RUN_LAST, None, ())}
delete_password_btn: Gtk.Button = Gtk.Template.Child()
change_password_btn: Gtk.Button = Gtk.Template.Child()
password_entry: Gtk.Entry = Gtk.Template.Child()
confirm_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_box: Gtk.Box = Gtk.Template.Child()
def __init__(self):
super(PasswordWidget, self).__init__()
self.parent = None
def reset_widgets(self):
"""Reset widgets state."""
self.password_entry.set_text('')
self.confirm_password_entry.set_text('')
self.current_password_entry.set_text('')
self.password_entry.get_style_context().remove_class('error')
self.confirm_password_entry.get_style_context().remove_class('error')
self.current_password_entry.get_style_context().remove_class('error')
self.change_password_btn.set_sensitive(False)
def set_current_password_visibility(self, visibilty: bool):
if not visibilty:
self.current_password_box.hide()
self.delete_password_btn.hide()
self.change_password_btn.set_label(_('Save Password'))
else:
self.current_password_box.show()
self.delete_password_btn.show()
self.change_password_btn.set_label(_('Change Password'))
@Gtk.Template.Callback('password_entry_changed')
def __validate_password(self, *_):
keyring = Keyring.get_default()
password = self.password_entry.get_text()
repeat_password = self.confirm_password_entry.get_text()
if not password:
self.password_entry.get_style_context().add_class('error')
valid_password = False
else:
self.password_entry.get_style_context().remove_class('error')
valid_password = True
if not repeat_password or password != repeat_password:
self.confirm_password_entry.get_style_context().add_class('error')
valid_repeat_password = False
else:
self.confirm_password_entry.get_style_context().remove_class(
'error')
valid_repeat_password = True
to_validate = [valid_password, valid_repeat_password]
if keyring.has_password():
old_password = self.current_password_entry.get_text()
if old_password != keyring.get_password():
self.current_password_entry.get_style_context().add_class(
'error')
valid_old_password = False
else:
self.current_password_entry.get_style_context().remove_class(
'error')
valid_old_password = True
to_validate.append(valid_old_password)
self.change_password_btn.set_sensitive(all(to_validate))
@Gtk.Template.Callback('update_password_clicked')
def __save_password(self, *__):
if self.change_password_btn.get_sensitive():
keyring = Keyring.get_default()
password = self.password_entry.get_text()
keyring.set_password(password)
self.reset_widgets()
self.set_current_password_visibility(True)
self.emit('password-updated')
@Gtk.Template.Callback('reset_password_clicked')
def __reset_password(self, *args):
dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)
dialog.props.message_type = Gtk.MessageType.QUESTION
dialog.props.text = _(
'Do you want to remove the authentication password?')
dialog.props.secondary_text = _(
'Authentication password enforces the privacy of your accounts.')
dialog.set_transient_for(self.parent)
response = dialog.run()
if response == Gtk.ResponseType.YES:
Keyring.get_default().remove_password()
self.reset_widgets()
self.set_current_password_visibility(False)
self.emit('password-deleted')
dialog.destroy()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = ['SettingsWindow']
@Gtk.Template(resource_path=
'/com/github/bilelmoussaoui/Authenticator/settings.ui')
class SettingsWindow(Handy.PreferencesWindow):
__gtype_name__ = 'SettingsWindow'
dark_theme_switch: Gtk.Switch = Gtk.Template.Child()
night_light_switch: Gtk.Switch = Gtk.Template.Child()
lock_row: Handy.ExpanderRow = Gtk.Template.Child()
lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()
lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()
def __init__(self):
super(SettingsWindow, self).__init__()
self.__init_widgets()
self.__bind_signals()
def __init_widgets(self):
self._password_widget = PasswordWidget()
self._password_widget.parent = self
self.lock_row.add(self._password_widget)
def _on_lock_row_expanded(self, *_):
keyring = Keyring.get_default()
if keyring.has_password():
keyring.set_password_state(self.lock_row.props.expanded)
self.lock_row_toggle_btn.props.active = False
def __on_lock_switch_toggled(self, toggle_btn: Gtk.ToggleButton, *_):
toggled = toggle_btn.props.active
expansion_enabled = self.lock_row.props.enable_expansion
if not Keyring.get_default().has_password(
) and not toggled and expansion_enabled:
self.lock_row.props.enable_expansion = False
def __bind_signals(self):
settings = Settings.get_default()
self.dark_theme_switch.set_active(settings.dark_theme and not
settings.night_light)
self.night_light_switch.set_active(settings.night_light)
settings.bind('night-light', self.night_light_switch, 'active', Gio
.SettingsBindFlags.DEFAULT)
keyring = Keyring.get_default()
self.lock_row.props.enable_expansion = keyring.has_password()
self.lock_row_toggle_btn = self.lock_row.get_children()[0
].get_children()[3]
self.lock_row.props.enable_expansion = Keyring.get_default(
).is_password_enabled()
self.lock_row.connect('notify::enable-expansion', self.
__on_enable_password)
self.lock_row_toggle_btn.connect('notify::active', self.
__on_lock_switch_toggled)
self.lock_row.connect('notify::expanded', self._on_lock_row_expanded)
keyring.bind_property('can-be-locked', self.lock_timeout_row,
'sensitive', GObject.BindingFlags.DEFAULT | GObject.
BindingFlags.SYNC_CREATE)
self.lock_timeout_spinbtn.props.value = settings.auto_lock_timeout
settings.bind('auto-lock-timeout', self.lock_timeout_spinbtn,
'value', Gio.SettingsBindFlags.DEFAULT)
self._password_widget.connect('password-updated', self.
__on_password_updated)
self._password_widget.connect('password-deleted', self.
__on_password_deleted)
def on_night_light_switch(switch: Gtk.Switch, _):
if switch.get_active() and self.dark_theme_switch.get_active():
self.dark_theme_switch.set_active(False)
self.night_light_switch.connect('notify::active', on_night_light_switch
)
def on_dark_theme_switch(switch: Gtk.Switch, _):
if settings.night_light and switch.get_active():
switch.set_state(False)
elif not settings.night_light:
settings.dark_theme = switch.get_active()
self.dark_theme_switch.connect('notify::active', on_dark_theme_switch)
def __on_enable_password(self, *_):
keyring = Keyring.get_default()
keyring.set_password_state(self.lock_row.props.enable_expansion)
if not keyring.has_password():
self._password_widget.set_current_password_visibility(False)
else:
self._password_widget.set_current_password_visibility(True)
def __on_password_updated(self, *_):
self.lock_row_toggle_btn.props.active = False
def __on_password_deleted(self, *__):
self.lock_row.set_enable_expansion(False)
self.lock_row_toggle_btn.props.active = False
@Gtk.Template(resource_path=
'/com/github/bilelmoussaoui/Authenticator/password_widget.ui')
class PasswordWidget(Gtk.Box):
__gtype_name__ = 'PasswordWidget'
__gsignals__ = {'password-updated': (GObject.SignalFlags.RUN_LAST, None,
()), 'password-deleted': (GObject.SignalFlags.RUN_LAST, None, ())}
delete_password_btn: Gtk.Button = Gtk.Template.Child()
change_password_btn: Gtk.Button = Gtk.Template.Child()
password_entry: Gtk.Entry = Gtk.Template.Child()
confirm_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_box: Gtk.Box = Gtk.Template.Child()
def __init__(self):
super(PasswordWidget, self).__init__()
self.parent = None
def reset_widgets(self):
"""Reset widgets state."""
self.password_entry.set_text('')
self.confirm_password_entry.set_text('')
self.current_password_entry.set_text('')
self.password_entry.get_style_context().remove_class('error')
self.confirm_password_entry.get_style_context().remove_class('error')
self.current_password_entry.get_style_context().remove_class('error')
self.change_password_btn.set_sensitive(False)
def set_current_password_visibility(self, visibilty: bool):
if not visibilty:
self.current_password_box.hide()
self.delete_password_btn.hide()
self.change_password_btn.set_label(_('Save Password'))
else:
self.current_password_box.show()
self.delete_password_btn.show()
self.change_password_btn.set_label(_('Change Password'))
@Gtk.Template.Callback('password_entry_changed')
def __validate_password(self, *_):
keyring = Keyring.get_default()
password = self.password_entry.get_text()
repeat_password = self.confirm_password_entry.get_text()
if not password:
self.password_entry.get_style_context().add_class('error')
valid_password = False
else:
self.password_entry.get_style_context().remove_class('error')
valid_password = True
if not repeat_password or password != repeat_password:
self.confirm_password_entry.get_style_context().add_class('error')
valid_repeat_password = False
else:
self.confirm_password_entry.get_style_context().remove_class(
'error')
valid_repeat_password = True
to_validate = [valid_password, valid_repeat_password]
if keyring.has_password():
old_password = self.current_password_entry.get_text()
if old_password != keyring.get_password():
self.current_password_entry.get_style_context().add_class(
'error')
valid_old_password = False
else:
self.current_password_entry.get_style_context().remove_class(
'error')
valid_old_password = True
to_validate.append(valid_old_password)
self.change_password_btn.set_sensitive(all(to_validate))
@Gtk.Template.Callback('update_password_clicked')
def __save_password(self, *__):
if self.change_password_btn.get_sensitive():
keyring = Keyring.get_default()
password = self.password_entry.get_text()
keyring.set_password(password)
self.reset_widgets()
self.set_current_password_visibility(True)
self.emit('password-updated')
@Gtk.Template.Callback('reset_password_clicked')
def __reset_password(self, *args):
dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)
dialog.props.message_type = Gtk.MessageType.QUESTION
dialog.props.text = _(
'Do you want to remove the authentication password?')
dialog.props.secondary_text = _(
'Authentication password enforces the privacy of your accounts.')
dialog.set_transient_for(self.parent)
response = dialog.run()
if response == Gtk.ResponseType.YES:
Keyring.get_default().remove_password()
self.reset_widgets()
self.set_current_password_visibility(False)
self.emit('password-deleted')
dialog.destroy()
<|reserved_special_token_1|>
"""
Copyright © 2017 Bilal Elmoussaoui <[email protected]>
This file is part of Authenticator.
Authenticator is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Authenticator is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Authenticator. If not, see <http://www.gnu.org/licenses/>.
"""
from gettext import gettext as _
from gi.repository import Gio, Gtk, GObject, Handy
from Authenticator.models import Settings, Keyring
__all__ = ['SettingsWindow']
@Gtk.Template(resource_path='/com/github/bilelmoussaoui/Authenticator/settings.ui')
class SettingsWindow(Handy.PreferencesWindow):
__gtype_name__ = 'SettingsWindow'
dark_theme_switch: Gtk.Switch = Gtk.Template.Child()
night_light_switch: Gtk.Switch = Gtk.Template.Child()
lock_row: Handy.ExpanderRow = Gtk.Template.Child()
lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()
lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()
def __init__(self):
super(SettingsWindow, self).__init__()
self.__init_widgets()
self.__bind_signals()
def __init_widgets(self):
self._password_widget = PasswordWidget()
self._password_widget.parent = self
self.lock_row.add(self._password_widget)
def _on_lock_row_expanded(self, *_):
keyring = Keyring.get_default()
if keyring.has_password():
keyring.set_password_state(self.lock_row.props.expanded)
self.lock_row_toggle_btn.props.active = False
def __on_lock_switch_toggled(self, toggle_btn: Gtk.ToggleButton, *_):
toggled = toggle_btn.props.active
expansion_enabled = self.lock_row.props.enable_expansion
if not Keyring.get_default().has_password() and not toggled and expansion_enabled:
self.lock_row.props.enable_expansion = False
def __bind_signals(self):
settings = Settings.get_default()
self.dark_theme_switch.set_active(settings.dark_theme and not settings.night_light)
self.night_light_switch.set_active(settings.night_light)
settings.bind("night-light", self.night_light_switch,
"active", Gio.SettingsBindFlags.DEFAULT)
keyring = Keyring.get_default()
# Hackish solution to get the expander from HdyExpanderRow
self.lock_row.props.enable_expansion = keyring.has_password()
self.lock_row_toggle_btn = self.lock_row.get_children()[0].get_children()[3]
self.lock_row.props.enable_expansion = Keyring.get_default().is_password_enabled()
self.lock_row.connect("notify::enable-expansion", self.__on_enable_password)
self.lock_row_toggle_btn.connect("notify::active", self.__on_lock_switch_toggled)
self.lock_row.connect("notify::expanded", self._on_lock_row_expanded)
keyring.bind_property("can-be-locked", self.lock_timeout_row, "sensitive",
GObject.BindingFlags.DEFAULT | GObject.BindingFlags.SYNC_CREATE)
self.lock_timeout_spinbtn.props.value = settings.auto_lock_timeout
settings.bind("auto-lock-timeout", self.lock_timeout_spinbtn, "value",
Gio.SettingsBindFlags.DEFAULT)
self._password_widget.connect("password-updated", self.__on_password_updated)
self._password_widget.connect("password-deleted", self.__on_password_deleted)
def on_night_light_switch(switch: Gtk.Switch, _):
# Set the application to use Light theme
if switch.get_active() and self.dark_theme_switch.get_active():
self.dark_theme_switch.set_active(False)
self.night_light_switch.connect("notify::active", on_night_light_switch)
def on_dark_theme_switch(switch: Gtk.Switch, _):
# Set the application to use Light theme
if settings.night_light and switch.get_active():
switch.set_state(False)
elif not settings.night_light:
settings.dark_theme = switch.get_active()
self.dark_theme_switch.connect("notify::active", on_dark_theme_switch)
def __on_enable_password(self, *_):
keyring = Keyring.get_default()
keyring.set_password_state(self.lock_row.props.enable_expansion)
if not keyring.has_password():
self._password_widget.set_current_password_visibility(False)
else:
self._password_widget.set_current_password_visibility(True)
def __on_password_updated(self, *_):
self.lock_row_toggle_btn.props.active = False
def __on_password_deleted(self, *__):
# self.notification.send(_("The authentication password was deleted."))
self.lock_row.set_enable_expansion(False)
self.lock_row_toggle_btn.props.active = False
@Gtk.Template(resource_path='/com/github/bilelmoussaoui/Authenticator/password_widget.ui')
class PasswordWidget(Gtk.Box):
__gtype_name__ = 'PasswordWidget'
__gsignals__ = {
'password-updated': (
GObject.SignalFlags.RUN_LAST,
None,
()
),
'password-deleted': (
GObject.SignalFlags.RUN_LAST,
None,
()
),
}
delete_password_btn: Gtk.Button = Gtk.Template.Child()
change_password_btn: Gtk.Button = Gtk.Template.Child()
password_entry: Gtk.Entry = Gtk.Template.Child()
confirm_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_entry: Gtk.Entry = Gtk.Template.Child()
current_password_box: Gtk.Box = Gtk.Template.Child()
def __init__(self):
super(PasswordWidget, self).__init__()
self.parent = None
def reset_widgets(self):
"""Reset widgets state."""
self.password_entry.set_text("")
self.confirm_password_entry.set_text("")
self.current_password_entry.set_text("")
self.password_entry.get_style_context().remove_class("error")
self.confirm_password_entry.get_style_context().remove_class("error")
self.current_password_entry.get_style_context().remove_class("error")
self.change_password_btn.set_sensitive(False)
def set_current_password_visibility(self, visibilty: bool):
if not visibilty:
self.current_password_box.hide()
self.delete_password_btn.hide()
self.change_password_btn.set_label(_("Save Password"))
else:
self.current_password_box.show()
self.delete_password_btn.show()
self.change_password_btn.set_label(_("Change Password"))
@Gtk.Template.Callback('password_entry_changed')
def __validate_password(self, *_):
keyring = Keyring.get_default()
password = self.password_entry.get_text()
repeat_password = self.confirm_password_entry.get_text()
if not password:
self.password_entry.get_style_context().add_class("error")
valid_password = False
else:
self.password_entry.get_style_context().remove_class("error")
valid_password = True
if not repeat_password or password != repeat_password:
self.confirm_password_entry.get_style_context().add_class("error")
valid_repeat_password = False
else:
self.confirm_password_entry.get_style_context().remove_class("error")
valid_repeat_password = True
to_validate = [valid_password, valid_repeat_password]
if keyring.has_password():
old_password = self.current_password_entry.get_text()
if old_password != keyring.get_password():
self.current_password_entry.get_style_context().add_class("error")
valid_old_password = False
else:
self.current_password_entry.get_style_context().remove_class("error")
valid_old_password = True
to_validate.append(valid_old_password)
self.change_password_btn.set_sensitive(all(to_validate))
@Gtk.Template.Callback('update_password_clicked')
def __save_password(self, *__):
if self.change_password_btn.get_sensitive():
keyring = Keyring.get_default()
password = self.password_entry.get_text()
keyring.set_password(password)
self.reset_widgets()
self.set_current_password_visibility(True)
self.emit("password-updated")
@Gtk.Template.Callback('reset_password_clicked')
def __reset_password(self, *args):
dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)
dialog.props.message_type = Gtk.MessageType.QUESTION
dialog.props.text = _("Do you want to remove the authentication password?")
dialog.props.secondary_text = _("Authentication password enforces the privacy of your accounts.")
dialog.set_transient_for(self.parent)
response = dialog.run()
if response == Gtk.ResponseType.YES:
Keyring.get_default().remove_password()
self.reset_widgets()
self.set_current_password_visibility(False)
self.emit("password-deleted")
dialog.destroy()
|
flexible
|
{
"blob_id": "a7d8efe3231b3e3b9bfc5ef64a936816e8b67d6c",
"index": 3127,
"step-1": "<mask token>\n\n\[email protected](resource_path=\n '/com/github/bilelmoussaoui/Authenticator/settings.ui')\nclass SettingsWindow(Handy.PreferencesWindow):\n <mask token>\n dark_theme_switch: Gtk.Switch = Gtk.Template.Child()\n night_light_switch: Gtk.Switch = Gtk.Template.Child()\n lock_row: Handy.ExpanderRow = Gtk.Template.Child()\n lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()\n lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __on_enable_password(self, *_):\n keyring = Keyring.get_default()\n keyring.set_password_state(self.lock_row.props.enable_expansion)\n if not keyring.has_password():\n self._password_widget.set_current_password_visibility(False)\n else:\n self._password_widget.set_current_password_visibility(True)\n <mask token>\n <mask token>\n\n\[email protected](resource_path=\n '/com/github/bilelmoussaoui/Authenticator/password_widget.ui')\nclass PasswordWidget(Gtk.Box):\n __gtype_name__ = 'PasswordWidget'\n __gsignals__ = {'password-updated': (GObject.SignalFlags.RUN_LAST, None,\n ()), 'password-deleted': (GObject.SignalFlags.RUN_LAST, None, ())}\n delete_password_btn: Gtk.Button = Gtk.Template.Child()\n change_password_btn: Gtk.Button = Gtk.Template.Child()\n password_entry: Gtk.Entry = Gtk.Template.Child()\n confirm_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_box: Gtk.Box = Gtk.Template.Child()\n\n def __init__(self):\n super(PasswordWidget, self).__init__()\n self.parent = None\n\n def reset_widgets(self):\n \"\"\"Reset widgets state.\"\"\"\n self.password_entry.set_text('')\n self.confirm_password_entry.set_text('')\n self.current_password_entry.set_text('')\n self.password_entry.get_style_context().remove_class('error')\n self.confirm_password_entry.get_style_context().remove_class('error')\n self.current_password_entry.get_style_context().remove_class('error')\n self.change_password_btn.set_sensitive(False)\n\n def set_current_password_visibility(self, visibilty: bool):\n if not visibilty:\n self.current_password_box.hide()\n self.delete_password_btn.hide()\n self.change_password_btn.set_label(_('Save Password'))\n else:\n self.current_password_box.show()\n self.delete_password_btn.show()\n self.change_password_btn.set_label(_('Change Password'))\n\n @Gtk.Template.Callback('password_entry_changed')\n def __validate_password(self, *_):\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n repeat_password = self.confirm_password_entry.get_text()\n if not password:\n self.password_entry.get_style_context().add_class('error')\n valid_password = False\n else:\n self.password_entry.get_style_context().remove_class('error')\n valid_password = True\n if not repeat_password or password != repeat_password:\n self.confirm_password_entry.get_style_context().add_class('error')\n valid_repeat_password = False\n else:\n self.confirm_password_entry.get_style_context().remove_class(\n 'error')\n valid_repeat_password = True\n to_validate = [valid_password, valid_repeat_password]\n if keyring.has_password():\n old_password = self.current_password_entry.get_text()\n if old_password != keyring.get_password():\n self.current_password_entry.get_style_context().add_class(\n 'error')\n valid_old_password = False\n else:\n self.current_password_entry.get_style_context().remove_class(\n 'error')\n valid_old_password = True\n to_validate.append(valid_old_password)\n self.change_password_btn.set_sensitive(all(to_validate))\n\n @Gtk.Template.Callback('update_password_clicked')\n def __save_password(self, *__):\n if self.change_password_btn.get_sensitive():\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n keyring.set_password(password)\n self.reset_widgets()\n self.set_current_password_visibility(True)\n self.emit('password-updated')\n\n @Gtk.Template.Callback('reset_password_clicked')\n def __reset_password(self, *args):\n dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)\n dialog.props.message_type = Gtk.MessageType.QUESTION\n dialog.props.text = _(\n 'Do you want to remove the authentication password?')\n dialog.props.secondary_text = _(\n 'Authentication password enforces the privacy of your accounts.')\n dialog.set_transient_for(self.parent)\n response = dialog.run()\n if response == Gtk.ResponseType.YES:\n Keyring.get_default().remove_password()\n self.reset_widgets()\n self.set_current_password_visibility(False)\n self.emit('password-deleted')\n dialog.destroy()\n",
"step-2": "<mask token>\n\n\[email protected](resource_path=\n '/com/github/bilelmoussaoui/Authenticator/settings.ui')\nclass SettingsWindow(Handy.PreferencesWindow):\n <mask token>\n dark_theme_switch: Gtk.Switch = Gtk.Template.Child()\n night_light_switch: Gtk.Switch = Gtk.Template.Child()\n lock_row: Handy.ExpanderRow = Gtk.Template.Child()\n lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()\n lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()\n <mask token>\n <mask token>\n\n def _on_lock_row_expanded(self, *_):\n keyring = Keyring.get_default()\n if keyring.has_password():\n keyring.set_password_state(self.lock_row.props.expanded)\n self.lock_row_toggle_btn.props.active = False\n <mask token>\n <mask token>\n\n def __on_enable_password(self, *_):\n keyring = Keyring.get_default()\n keyring.set_password_state(self.lock_row.props.enable_expansion)\n if not keyring.has_password():\n self._password_widget.set_current_password_visibility(False)\n else:\n self._password_widget.set_current_password_visibility(True)\n <mask token>\n <mask token>\n\n\[email protected](resource_path=\n '/com/github/bilelmoussaoui/Authenticator/password_widget.ui')\nclass PasswordWidget(Gtk.Box):\n __gtype_name__ = 'PasswordWidget'\n __gsignals__ = {'password-updated': (GObject.SignalFlags.RUN_LAST, None,\n ()), 'password-deleted': (GObject.SignalFlags.RUN_LAST, None, ())}\n delete_password_btn: Gtk.Button = Gtk.Template.Child()\n change_password_btn: Gtk.Button = Gtk.Template.Child()\n password_entry: Gtk.Entry = Gtk.Template.Child()\n confirm_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_box: Gtk.Box = Gtk.Template.Child()\n\n def __init__(self):\n super(PasswordWidget, self).__init__()\n self.parent = None\n\n def reset_widgets(self):\n \"\"\"Reset widgets state.\"\"\"\n self.password_entry.set_text('')\n self.confirm_password_entry.set_text('')\n self.current_password_entry.set_text('')\n self.password_entry.get_style_context().remove_class('error')\n self.confirm_password_entry.get_style_context().remove_class('error')\n self.current_password_entry.get_style_context().remove_class('error')\n self.change_password_btn.set_sensitive(False)\n\n def set_current_password_visibility(self, visibilty: bool):\n if not visibilty:\n self.current_password_box.hide()\n self.delete_password_btn.hide()\n self.change_password_btn.set_label(_('Save Password'))\n else:\n self.current_password_box.show()\n self.delete_password_btn.show()\n self.change_password_btn.set_label(_('Change Password'))\n\n @Gtk.Template.Callback('password_entry_changed')\n def __validate_password(self, *_):\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n repeat_password = self.confirm_password_entry.get_text()\n if not password:\n self.password_entry.get_style_context().add_class('error')\n valid_password = False\n else:\n self.password_entry.get_style_context().remove_class('error')\n valid_password = True\n if not repeat_password or password != repeat_password:\n self.confirm_password_entry.get_style_context().add_class('error')\n valid_repeat_password = False\n else:\n self.confirm_password_entry.get_style_context().remove_class(\n 'error')\n valid_repeat_password = True\n to_validate = [valid_password, valid_repeat_password]\n if keyring.has_password():\n old_password = self.current_password_entry.get_text()\n if old_password != keyring.get_password():\n self.current_password_entry.get_style_context().add_class(\n 'error')\n valid_old_password = False\n else:\n self.current_password_entry.get_style_context().remove_class(\n 'error')\n valid_old_password = True\n to_validate.append(valid_old_password)\n self.change_password_btn.set_sensitive(all(to_validate))\n\n @Gtk.Template.Callback('update_password_clicked')\n def __save_password(self, *__):\n if self.change_password_btn.get_sensitive():\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n keyring.set_password(password)\n self.reset_widgets()\n self.set_current_password_visibility(True)\n self.emit('password-updated')\n\n @Gtk.Template.Callback('reset_password_clicked')\n def __reset_password(self, *args):\n dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)\n dialog.props.message_type = Gtk.MessageType.QUESTION\n dialog.props.text = _(\n 'Do you want to remove the authentication password?')\n dialog.props.secondary_text = _(\n 'Authentication password enforces the privacy of your accounts.')\n dialog.set_transient_for(self.parent)\n response = dialog.run()\n if response == Gtk.ResponseType.YES:\n Keyring.get_default().remove_password()\n self.reset_widgets()\n self.set_current_password_visibility(False)\n self.emit('password-deleted')\n dialog.destroy()\n",
"step-3": "<mask token>\n\n\[email protected](resource_path=\n '/com/github/bilelmoussaoui/Authenticator/settings.ui')\nclass SettingsWindow(Handy.PreferencesWindow):\n <mask token>\n dark_theme_switch: Gtk.Switch = Gtk.Template.Child()\n night_light_switch: Gtk.Switch = Gtk.Template.Child()\n lock_row: Handy.ExpanderRow = Gtk.Template.Child()\n lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()\n lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()\n\n def __init__(self):\n super(SettingsWindow, self).__init__()\n self.__init_widgets()\n self.__bind_signals()\n <mask token>\n\n def _on_lock_row_expanded(self, *_):\n keyring = Keyring.get_default()\n if keyring.has_password():\n keyring.set_password_state(self.lock_row.props.expanded)\n self.lock_row_toggle_btn.props.active = False\n <mask token>\n\n def __bind_signals(self):\n settings = Settings.get_default()\n self.dark_theme_switch.set_active(settings.dark_theme and not\n settings.night_light)\n self.night_light_switch.set_active(settings.night_light)\n settings.bind('night-light', self.night_light_switch, 'active', Gio\n .SettingsBindFlags.DEFAULT)\n keyring = Keyring.get_default()\n self.lock_row.props.enable_expansion = keyring.has_password()\n self.lock_row_toggle_btn = self.lock_row.get_children()[0\n ].get_children()[3]\n self.lock_row.props.enable_expansion = Keyring.get_default(\n ).is_password_enabled()\n self.lock_row.connect('notify::enable-expansion', self.\n __on_enable_password)\n self.lock_row_toggle_btn.connect('notify::active', self.\n __on_lock_switch_toggled)\n self.lock_row.connect('notify::expanded', self._on_lock_row_expanded)\n keyring.bind_property('can-be-locked', self.lock_timeout_row,\n 'sensitive', GObject.BindingFlags.DEFAULT | GObject.\n BindingFlags.SYNC_CREATE)\n self.lock_timeout_spinbtn.props.value = settings.auto_lock_timeout\n settings.bind('auto-lock-timeout', self.lock_timeout_spinbtn,\n 'value', Gio.SettingsBindFlags.DEFAULT)\n self._password_widget.connect('password-updated', self.\n __on_password_updated)\n self._password_widget.connect('password-deleted', self.\n __on_password_deleted)\n\n def on_night_light_switch(switch: Gtk.Switch, _):\n if switch.get_active() and self.dark_theme_switch.get_active():\n self.dark_theme_switch.set_active(False)\n self.night_light_switch.connect('notify::active', on_night_light_switch\n )\n\n def on_dark_theme_switch(switch: Gtk.Switch, _):\n if settings.night_light and switch.get_active():\n switch.set_state(False)\n elif not settings.night_light:\n settings.dark_theme = switch.get_active()\n self.dark_theme_switch.connect('notify::active', on_dark_theme_switch)\n\n def __on_enable_password(self, *_):\n keyring = Keyring.get_default()\n keyring.set_password_state(self.lock_row.props.enable_expansion)\n if not keyring.has_password():\n self._password_widget.set_current_password_visibility(False)\n else:\n self._password_widget.set_current_password_visibility(True)\n <mask token>\n <mask token>\n\n\[email protected](resource_path=\n '/com/github/bilelmoussaoui/Authenticator/password_widget.ui')\nclass PasswordWidget(Gtk.Box):\n __gtype_name__ = 'PasswordWidget'\n __gsignals__ = {'password-updated': (GObject.SignalFlags.RUN_LAST, None,\n ()), 'password-deleted': (GObject.SignalFlags.RUN_LAST, None, ())}\n delete_password_btn: Gtk.Button = Gtk.Template.Child()\n change_password_btn: Gtk.Button = Gtk.Template.Child()\n password_entry: Gtk.Entry = Gtk.Template.Child()\n confirm_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_box: Gtk.Box = Gtk.Template.Child()\n\n def __init__(self):\n super(PasswordWidget, self).__init__()\n self.parent = None\n\n def reset_widgets(self):\n \"\"\"Reset widgets state.\"\"\"\n self.password_entry.set_text('')\n self.confirm_password_entry.set_text('')\n self.current_password_entry.set_text('')\n self.password_entry.get_style_context().remove_class('error')\n self.confirm_password_entry.get_style_context().remove_class('error')\n self.current_password_entry.get_style_context().remove_class('error')\n self.change_password_btn.set_sensitive(False)\n\n def set_current_password_visibility(self, visibilty: bool):\n if not visibilty:\n self.current_password_box.hide()\n self.delete_password_btn.hide()\n self.change_password_btn.set_label(_('Save Password'))\n else:\n self.current_password_box.show()\n self.delete_password_btn.show()\n self.change_password_btn.set_label(_('Change Password'))\n\n @Gtk.Template.Callback('password_entry_changed')\n def __validate_password(self, *_):\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n repeat_password = self.confirm_password_entry.get_text()\n if not password:\n self.password_entry.get_style_context().add_class('error')\n valid_password = False\n else:\n self.password_entry.get_style_context().remove_class('error')\n valid_password = True\n if not repeat_password or password != repeat_password:\n self.confirm_password_entry.get_style_context().add_class('error')\n valid_repeat_password = False\n else:\n self.confirm_password_entry.get_style_context().remove_class(\n 'error')\n valid_repeat_password = True\n to_validate = [valid_password, valid_repeat_password]\n if keyring.has_password():\n old_password = self.current_password_entry.get_text()\n if old_password != keyring.get_password():\n self.current_password_entry.get_style_context().add_class(\n 'error')\n valid_old_password = False\n else:\n self.current_password_entry.get_style_context().remove_class(\n 'error')\n valid_old_password = True\n to_validate.append(valid_old_password)\n self.change_password_btn.set_sensitive(all(to_validate))\n\n @Gtk.Template.Callback('update_password_clicked')\n def __save_password(self, *__):\n if self.change_password_btn.get_sensitive():\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n keyring.set_password(password)\n self.reset_widgets()\n self.set_current_password_visibility(True)\n self.emit('password-updated')\n\n @Gtk.Template.Callback('reset_password_clicked')\n def __reset_password(self, *args):\n dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)\n dialog.props.message_type = Gtk.MessageType.QUESTION\n dialog.props.text = _(\n 'Do you want to remove the authentication password?')\n dialog.props.secondary_text = _(\n 'Authentication password enforces the privacy of your accounts.')\n dialog.set_transient_for(self.parent)\n response = dialog.run()\n if response == Gtk.ResponseType.YES:\n Keyring.get_default().remove_password()\n self.reset_widgets()\n self.set_current_password_visibility(False)\n self.emit('password-deleted')\n dialog.destroy()\n",
"step-4": "<mask token>\n__all__ = ['SettingsWindow']\n\n\[email protected](resource_path=\n '/com/github/bilelmoussaoui/Authenticator/settings.ui')\nclass SettingsWindow(Handy.PreferencesWindow):\n __gtype_name__ = 'SettingsWindow'\n dark_theme_switch: Gtk.Switch = Gtk.Template.Child()\n night_light_switch: Gtk.Switch = Gtk.Template.Child()\n lock_row: Handy.ExpanderRow = Gtk.Template.Child()\n lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()\n lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()\n\n def __init__(self):\n super(SettingsWindow, self).__init__()\n self.__init_widgets()\n self.__bind_signals()\n\n def __init_widgets(self):\n self._password_widget = PasswordWidget()\n self._password_widget.parent = self\n self.lock_row.add(self._password_widget)\n\n def _on_lock_row_expanded(self, *_):\n keyring = Keyring.get_default()\n if keyring.has_password():\n keyring.set_password_state(self.lock_row.props.expanded)\n self.lock_row_toggle_btn.props.active = False\n\n def __on_lock_switch_toggled(self, toggle_btn: Gtk.ToggleButton, *_):\n toggled = toggle_btn.props.active\n expansion_enabled = self.lock_row.props.enable_expansion\n if not Keyring.get_default().has_password(\n ) and not toggled and expansion_enabled:\n self.lock_row.props.enable_expansion = False\n\n def __bind_signals(self):\n settings = Settings.get_default()\n self.dark_theme_switch.set_active(settings.dark_theme and not\n settings.night_light)\n self.night_light_switch.set_active(settings.night_light)\n settings.bind('night-light', self.night_light_switch, 'active', Gio\n .SettingsBindFlags.DEFAULT)\n keyring = Keyring.get_default()\n self.lock_row.props.enable_expansion = keyring.has_password()\n self.lock_row_toggle_btn = self.lock_row.get_children()[0\n ].get_children()[3]\n self.lock_row.props.enable_expansion = Keyring.get_default(\n ).is_password_enabled()\n self.lock_row.connect('notify::enable-expansion', self.\n __on_enable_password)\n self.lock_row_toggle_btn.connect('notify::active', self.\n __on_lock_switch_toggled)\n self.lock_row.connect('notify::expanded', self._on_lock_row_expanded)\n keyring.bind_property('can-be-locked', self.lock_timeout_row,\n 'sensitive', GObject.BindingFlags.DEFAULT | GObject.\n BindingFlags.SYNC_CREATE)\n self.lock_timeout_spinbtn.props.value = settings.auto_lock_timeout\n settings.bind('auto-lock-timeout', self.lock_timeout_spinbtn,\n 'value', Gio.SettingsBindFlags.DEFAULT)\n self._password_widget.connect('password-updated', self.\n __on_password_updated)\n self._password_widget.connect('password-deleted', self.\n __on_password_deleted)\n\n def on_night_light_switch(switch: Gtk.Switch, _):\n if switch.get_active() and self.dark_theme_switch.get_active():\n self.dark_theme_switch.set_active(False)\n self.night_light_switch.connect('notify::active', on_night_light_switch\n )\n\n def on_dark_theme_switch(switch: Gtk.Switch, _):\n if settings.night_light and switch.get_active():\n switch.set_state(False)\n elif not settings.night_light:\n settings.dark_theme = switch.get_active()\n self.dark_theme_switch.connect('notify::active', on_dark_theme_switch)\n\n def __on_enable_password(self, *_):\n keyring = Keyring.get_default()\n keyring.set_password_state(self.lock_row.props.enable_expansion)\n if not keyring.has_password():\n self._password_widget.set_current_password_visibility(False)\n else:\n self._password_widget.set_current_password_visibility(True)\n\n def __on_password_updated(self, *_):\n self.lock_row_toggle_btn.props.active = False\n\n def __on_password_deleted(self, *__):\n self.lock_row.set_enable_expansion(False)\n self.lock_row_toggle_btn.props.active = False\n\n\[email protected](resource_path=\n '/com/github/bilelmoussaoui/Authenticator/password_widget.ui')\nclass PasswordWidget(Gtk.Box):\n __gtype_name__ = 'PasswordWidget'\n __gsignals__ = {'password-updated': (GObject.SignalFlags.RUN_LAST, None,\n ()), 'password-deleted': (GObject.SignalFlags.RUN_LAST, None, ())}\n delete_password_btn: Gtk.Button = Gtk.Template.Child()\n change_password_btn: Gtk.Button = Gtk.Template.Child()\n password_entry: Gtk.Entry = Gtk.Template.Child()\n confirm_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_box: Gtk.Box = Gtk.Template.Child()\n\n def __init__(self):\n super(PasswordWidget, self).__init__()\n self.parent = None\n\n def reset_widgets(self):\n \"\"\"Reset widgets state.\"\"\"\n self.password_entry.set_text('')\n self.confirm_password_entry.set_text('')\n self.current_password_entry.set_text('')\n self.password_entry.get_style_context().remove_class('error')\n self.confirm_password_entry.get_style_context().remove_class('error')\n self.current_password_entry.get_style_context().remove_class('error')\n self.change_password_btn.set_sensitive(False)\n\n def set_current_password_visibility(self, visibilty: bool):\n if not visibilty:\n self.current_password_box.hide()\n self.delete_password_btn.hide()\n self.change_password_btn.set_label(_('Save Password'))\n else:\n self.current_password_box.show()\n self.delete_password_btn.show()\n self.change_password_btn.set_label(_('Change Password'))\n\n @Gtk.Template.Callback('password_entry_changed')\n def __validate_password(self, *_):\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n repeat_password = self.confirm_password_entry.get_text()\n if not password:\n self.password_entry.get_style_context().add_class('error')\n valid_password = False\n else:\n self.password_entry.get_style_context().remove_class('error')\n valid_password = True\n if not repeat_password or password != repeat_password:\n self.confirm_password_entry.get_style_context().add_class('error')\n valid_repeat_password = False\n else:\n self.confirm_password_entry.get_style_context().remove_class(\n 'error')\n valid_repeat_password = True\n to_validate = [valid_password, valid_repeat_password]\n if keyring.has_password():\n old_password = self.current_password_entry.get_text()\n if old_password != keyring.get_password():\n self.current_password_entry.get_style_context().add_class(\n 'error')\n valid_old_password = False\n else:\n self.current_password_entry.get_style_context().remove_class(\n 'error')\n valid_old_password = True\n to_validate.append(valid_old_password)\n self.change_password_btn.set_sensitive(all(to_validate))\n\n @Gtk.Template.Callback('update_password_clicked')\n def __save_password(self, *__):\n if self.change_password_btn.get_sensitive():\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n keyring.set_password(password)\n self.reset_widgets()\n self.set_current_password_visibility(True)\n self.emit('password-updated')\n\n @Gtk.Template.Callback('reset_password_clicked')\n def __reset_password(self, *args):\n dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)\n dialog.props.message_type = Gtk.MessageType.QUESTION\n dialog.props.text = _(\n 'Do you want to remove the authentication password?')\n dialog.props.secondary_text = _(\n 'Authentication password enforces the privacy of your accounts.')\n dialog.set_transient_for(self.parent)\n response = dialog.run()\n if response == Gtk.ResponseType.YES:\n Keyring.get_default().remove_password()\n self.reset_widgets()\n self.set_current_password_visibility(False)\n self.emit('password-deleted')\n dialog.destroy()\n",
"step-5": "\"\"\"\n Copyright © 2017 Bilal Elmoussaoui <[email protected]>\n\n This file is part of Authenticator.\n\n Authenticator is free software: you can redistribute it and/or\n modify it under the terms of the GNU General Public License as published\n by the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n Authenticator is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with Authenticator. If not, see <http://www.gnu.org/licenses/>.\n\"\"\"\nfrom gettext import gettext as _\nfrom gi.repository import Gio, Gtk, GObject, Handy\nfrom Authenticator.models import Settings, Keyring\n\n__all__ = ['SettingsWindow']\n\n\[email protected](resource_path='/com/github/bilelmoussaoui/Authenticator/settings.ui')\nclass SettingsWindow(Handy.PreferencesWindow):\n __gtype_name__ = 'SettingsWindow'\n\n dark_theme_switch: Gtk.Switch = Gtk.Template.Child()\n night_light_switch: Gtk.Switch = Gtk.Template.Child()\n\n lock_row: Handy.ExpanderRow = Gtk.Template.Child()\n\n lock_timeout_row: Handy.ActionRow = Gtk.Template.Child()\n lock_timeout_spinbtn: Gtk.SpinButton = Gtk.Template.Child()\n\n def __init__(self):\n super(SettingsWindow, self).__init__()\n\n self.__init_widgets()\n self.__bind_signals()\n\n def __init_widgets(self):\n\n self._password_widget = PasswordWidget()\n self._password_widget.parent = self\n self.lock_row.add(self._password_widget)\n\n def _on_lock_row_expanded(self, *_):\n keyring = Keyring.get_default()\n if keyring.has_password():\n keyring.set_password_state(self.lock_row.props.expanded)\n self.lock_row_toggle_btn.props.active = False\n\n def __on_lock_switch_toggled(self, toggle_btn: Gtk.ToggleButton, *_):\n toggled = toggle_btn.props.active\n expansion_enabled = self.lock_row.props.enable_expansion\n if not Keyring.get_default().has_password() and not toggled and expansion_enabled:\n self.lock_row.props.enable_expansion = False\n\n def __bind_signals(self):\n settings = Settings.get_default()\n self.dark_theme_switch.set_active(settings.dark_theme and not settings.night_light)\n\n self.night_light_switch.set_active(settings.night_light)\n settings.bind(\"night-light\", self.night_light_switch,\n \"active\", Gio.SettingsBindFlags.DEFAULT)\n\n keyring = Keyring.get_default()\n # Hackish solution to get the expander from HdyExpanderRow\n self.lock_row.props.enable_expansion = keyring.has_password()\n self.lock_row_toggle_btn = self.lock_row.get_children()[0].get_children()[3]\n\n self.lock_row.props.enable_expansion = Keyring.get_default().is_password_enabled()\n self.lock_row.connect(\"notify::enable-expansion\", self.__on_enable_password)\n self.lock_row_toggle_btn.connect(\"notify::active\", self.__on_lock_switch_toggled)\n self.lock_row.connect(\"notify::expanded\", self._on_lock_row_expanded)\n\n keyring.bind_property(\"can-be-locked\", self.lock_timeout_row, \"sensitive\",\n GObject.BindingFlags.DEFAULT | GObject.BindingFlags.SYNC_CREATE)\n self.lock_timeout_spinbtn.props.value = settings.auto_lock_timeout\n settings.bind(\"auto-lock-timeout\", self.lock_timeout_spinbtn, \"value\",\n Gio.SettingsBindFlags.DEFAULT)\n\n self._password_widget.connect(\"password-updated\", self.__on_password_updated)\n self._password_widget.connect(\"password-deleted\", self.__on_password_deleted)\n\n def on_night_light_switch(switch: Gtk.Switch, _):\n # Set the application to use Light theme\n if switch.get_active() and self.dark_theme_switch.get_active():\n\n self.dark_theme_switch.set_active(False)\n\n self.night_light_switch.connect(\"notify::active\", on_night_light_switch)\n\n def on_dark_theme_switch(switch: Gtk.Switch, _):\n # Set the application to use Light theme\n\n if settings.night_light and switch.get_active():\n switch.set_state(False)\n elif not settings.night_light:\n settings.dark_theme = switch.get_active()\n\n self.dark_theme_switch.connect(\"notify::active\", on_dark_theme_switch)\n\n def __on_enable_password(self, *_):\n keyring = Keyring.get_default()\n keyring.set_password_state(self.lock_row.props.enable_expansion)\n if not keyring.has_password():\n self._password_widget.set_current_password_visibility(False)\n else:\n self._password_widget.set_current_password_visibility(True)\n\n def __on_password_updated(self, *_):\n self.lock_row_toggle_btn.props.active = False\n\n def __on_password_deleted(self, *__):\n # self.notification.send(_(\"The authentication password was deleted.\"))\n self.lock_row.set_enable_expansion(False)\n self.lock_row_toggle_btn.props.active = False\n\n\[email protected](resource_path='/com/github/bilelmoussaoui/Authenticator/password_widget.ui')\nclass PasswordWidget(Gtk.Box):\n __gtype_name__ = 'PasswordWidget'\n __gsignals__ = {\n 'password-updated': (\n GObject.SignalFlags.RUN_LAST,\n None,\n ()\n ),\n 'password-deleted': (\n GObject.SignalFlags.RUN_LAST,\n None,\n ()\n ),\n }\n\n delete_password_btn: Gtk.Button = Gtk.Template.Child()\n change_password_btn: Gtk.Button = Gtk.Template.Child()\n\n password_entry: Gtk.Entry = Gtk.Template.Child()\n confirm_password_entry: Gtk.Entry = Gtk.Template.Child()\n current_password_entry: Gtk.Entry = Gtk.Template.Child()\n\n current_password_box: Gtk.Box = Gtk.Template.Child()\n\n def __init__(self):\n super(PasswordWidget, self).__init__()\n self.parent = None\n\n def reset_widgets(self):\n \"\"\"Reset widgets state.\"\"\"\n self.password_entry.set_text(\"\")\n self.confirm_password_entry.set_text(\"\")\n self.current_password_entry.set_text(\"\")\n\n self.password_entry.get_style_context().remove_class(\"error\")\n self.confirm_password_entry.get_style_context().remove_class(\"error\")\n self.current_password_entry.get_style_context().remove_class(\"error\")\n self.change_password_btn.set_sensitive(False)\n\n def set_current_password_visibility(self, visibilty: bool):\n if not visibilty:\n self.current_password_box.hide()\n self.delete_password_btn.hide()\n self.change_password_btn.set_label(_(\"Save Password\"))\n else:\n self.current_password_box.show()\n self.delete_password_btn.show()\n self.change_password_btn.set_label(_(\"Change Password\"))\n\n @Gtk.Template.Callback('password_entry_changed')\n def __validate_password(self, *_):\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n repeat_password = self.confirm_password_entry.get_text()\n if not password:\n self.password_entry.get_style_context().add_class(\"error\")\n valid_password = False\n else:\n self.password_entry.get_style_context().remove_class(\"error\")\n valid_password = True\n\n if not repeat_password or password != repeat_password:\n self.confirm_password_entry.get_style_context().add_class(\"error\")\n valid_repeat_password = False\n else:\n self.confirm_password_entry.get_style_context().remove_class(\"error\")\n valid_repeat_password = True\n to_validate = [valid_password, valid_repeat_password]\n\n if keyring.has_password():\n old_password = self.current_password_entry.get_text()\n if old_password != keyring.get_password():\n self.current_password_entry.get_style_context().add_class(\"error\")\n valid_old_password = False\n else:\n self.current_password_entry.get_style_context().remove_class(\"error\")\n valid_old_password = True\n to_validate.append(valid_old_password)\n\n self.change_password_btn.set_sensitive(all(to_validate))\n\n @Gtk.Template.Callback('update_password_clicked')\n def __save_password(self, *__):\n if self.change_password_btn.get_sensitive():\n keyring = Keyring.get_default()\n password = self.password_entry.get_text()\n keyring.set_password(password)\n self.reset_widgets()\n self.set_current_password_visibility(True)\n self.emit(\"password-updated\")\n\n @Gtk.Template.Callback('reset_password_clicked')\n def __reset_password(self, *args):\n dialog = Gtk.MessageDialog(buttons=Gtk.ButtonsType.YES_NO)\n dialog.props.message_type = Gtk.MessageType.QUESTION\n dialog.props.text = _(\"Do you want to remove the authentication password?\")\n dialog.props.secondary_text = _(\"Authentication password enforces the privacy of your accounts.\")\n\n dialog.set_transient_for(self.parent)\n\n response = dialog.run()\n if response == Gtk.ResponseType.YES:\n Keyring.get_default().remove_password()\n self.reset_widgets()\n self.set_current_password_visibility(False)\n self.emit(\"password-deleted\")\n dialog.destroy()\n",
"step-ids": [
10,
11,
13,
19,
21
]
}
|
[
10,
11,
13,
19,
21
] |
<|reserved_special_token_0|>
def read_vivado_report(hls_dir, full_report=False):
if not os.path.exists(hls_dir):
print('Path {} does not exist. Exiting.'.format(hls_dir))
return
prj_dir = None
top_func_name = None
if os.path.isfile(hls_dir + '/build_prj.tcl'):
prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'
)
if prj_dir is None or top_func_name is None:
print('Unable to read project data. Exiting.')
return
sln_dir = hls_dir + '/' + prj_dir
if not os.path.exists(sln_dir):
print('Project {} does not exist. Rerun "hls4ml build -p {}".'.
format(prj_dir, hls_dir))
return
solutions = _find_solutions(sln_dir)
print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))
for sln in solutions:
print('Reports for solution "{}":\n'.format(sln))
_find_reports(sln_dir + '/' + sln, top_func_name, full_report)
<|reserved_special_token_0|>
def _find_solutions(sln_dir):
solutions = []
if os.path.isfile(sln_dir + '/vivado_hls.app'):
with open(sln_dir + '/vivado_hls.app') as f:
xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1)
root = ET.fromstring(xmlstring)
for sln_tag in root.findall('solutions/solution'):
sln_name = sln_tag.get('name')
if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name
):
solutions.append(sln_name)
return solutions
def _find_reports(sln_dir, top_func_name, full_report=False):
csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)
if os.path.isfile(csim_file):
_show_csim_report(csim_file)
else:
print('C simulation report not found.')
syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)
if os.path.isfile(syn_file):
_show_synth_report(syn_file, full_report)
else:
print('Synthesis report not found.')
<|reserved_special_token_0|>
def _show_synth_report(synth_file, full_report=False):
with open(synth_file, 'r') as f:
print('SYNTHESIS REPORT:')
for line in f.readlines()[2:]:
if not full_report and '* DSP48' in line:
break
print(line, end='')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_vivado_report(hls_dir, full_report=False):
if not os.path.exists(hls_dir):
print('Path {} does not exist. Exiting.'.format(hls_dir))
return
prj_dir = None
top_func_name = None
if os.path.isfile(hls_dir + '/build_prj.tcl'):
prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'
)
if prj_dir is None or top_func_name is None:
print('Unable to read project data. Exiting.')
return
sln_dir = hls_dir + '/' + prj_dir
if not os.path.exists(sln_dir):
print('Project {} does not exist. Rerun "hls4ml build -p {}".'.
format(prj_dir, hls_dir))
return
solutions = _find_solutions(sln_dir)
print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))
for sln in solutions:
print('Reports for solution "{}":\n'.format(sln))
_find_reports(sln_dir + '/' + sln, top_func_name, full_report)
<|reserved_special_token_0|>
def _find_solutions(sln_dir):
solutions = []
if os.path.isfile(sln_dir + '/vivado_hls.app'):
with open(sln_dir + '/vivado_hls.app') as f:
xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1)
root = ET.fromstring(xmlstring)
for sln_tag in root.findall('solutions/solution'):
sln_name = sln_tag.get('name')
if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name
):
solutions.append(sln_name)
return solutions
def _find_reports(sln_dir, top_func_name, full_report=False):
csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)
if os.path.isfile(csim_file):
_show_csim_report(csim_file)
else:
print('C simulation report not found.')
syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)
if os.path.isfile(syn_file):
_show_synth_report(syn_file, full_report)
else:
print('Synthesis report not found.')
def _show_csim_report(csim_file):
with open(csim_file, 'r') as f:
print('C SIMULATION RESULT:')
print(f.read())
def _show_synth_report(synth_file, full_report=False):
with open(synth_file, 'r') as f:
print('SYNTHESIS REPORT:')
for line in f.readlines()[2:]:
if not full_report and '* DSP48' in line:
break
print(line, end='')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def read_vivado_report(hls_dir, full_report=False):
if not os.path.exists(hls_dir):
print('Path {} does not exist. Exiting.'.format(hls_dir))
return
prj_dir = None
top_func_name = None
if os.path.isfile(hls_dir + '/build_prj.tcl'):
prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'
)
if prj_dir is None or top_func_name is None:
print('Unable to read project data. Exiting.')
return
sln_dir = hls_dir + '/' + prj_dir
if not os.path.exists(sln_dir):
print('Project {} does not exist. Rerun "hls4ml build -p {}".'.
format(prj_dir, hls_dir))
return
solutions = _find_solutions(sln_dir)
print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))
for sln in solutions:
print('Reports for solution "{}":\n'.format(sln))
_find_reports(sln_dir + '/' + sln, top_func_name, full_report)
def _parse_build_script(script_path):
prj_dir = None
top_func_name = None
with open(script_path, 'r') as f:
for line in f.readlines():
if 'open_project' in line:
prj_dir = line.split()[-1]
elif 'set_top' in line:
top_func_name = line.split()[-1]
return prj_dir, top_func_name
def _find_solutions(sln_dir):
solutions = []
if os.path.isfile(sln_dir + '/vivado_hls.app'):
with open(sln_dir + '/vivado_hls.app') as f:
xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1)
root = ET.fromstring(xmlstring)
for sln_tag in root.findall('solutions/solution'):
sln_name = sln_tag.get('name')
if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name
):
solutions.append(sln_name)
return solutions
def _find_reports(sln_dir, top_func_name, full_report=False):
csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)
if os.path.isfile(csim_file):
_show_csim_report(csim_file)
else:
print('C simulation report not found.')
syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)
if os.path.isfile(syn_file):
_show_synth_report(syn_file, full_report)
else:
print('Synthesis report not found.')
def _show_csim_report(csim_file):
with open(csim_file, 'r') as f:
print('C SIMULATION RESULT:')
print(f.read())
def _show_synth_report(synth_file, full_report=False):
with open(synth_file, 'r') as f:
print('SYNTHESIS REPORT:')
for line in f.readlines()[2:]:
if not full_report and '* DSP48' in line:
break
print(line, end='')
<|reserved_special_token_1|>
from __future__ import print_function
import os
import re
import xml.etree.ElementTree as ET
def read_vivado_report(hls_dir, full_report=False):
if not os.path.exists(hls_dir):
print('Path {} does not exist. Exiting.'.format(hls_dir))
return
prj_dir = None
top_func_name = None
if os.path.isfile(hls_dir + '/build_prj.tcl'):
prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'
)
if prj_dir is None or top_func_name is None:
print('Unable to read project data. Exiting.')
return
sln_dir = hls_dir + '/' + prj_dir
if not os.path.exists(sln_dir):
print('Project {} does not exist. Rerun "hls4ml build -p {}".'.
format(prj_dir, hls_dir))
return
solutions = _find_solutions(sln_dir)
print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))
for sln in solutions:
print('Reports for solution "{}":\n'.format(sln))
_find_reports(sln_dir + '/' + sln, top_func_name, full_report)
def _parse_build_script(script_path):
prj_dir = None
top_func_name = None
with open(script_path, 'r') as f:
for line in f.readlines():
if 'open_project' in line:
prj_dir = line.split()[-1]
elif 'set_top' in line:
top_func_name = line.split()[-1]
return prj_dir, top_func_name
def _find_solutions(sln_dir):
solutions = []
if os.path.isfile(sln_dir + '/vivado_hls.app'):
with open(sln_dir + '/vivado_hls.app') as f:
xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1)
root = ET.fromstring(xmlstring)
for sln_tag in root.findall('solutions/solution'):
sln_name = sln_tag.get('name')
if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name
):
solutions.append(sln_name)
return solutions
def _find_reports(sln_dir, top_func_name, full_report=False):
csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)
if os.path.isfile(csim_file):
_show_csim_report(csim_file)
else:
print('C simulation report not found.')
syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)
if os.path.isfile(syn_file):
_show_synth_report(syn_file, full_report)
else:
print('Synthesis report not found.')
def _show_csim_report(csim_file):
with open(csim_file, 'r') as f:
print('C SIMULATION RESULT:')
print(f.read())
def _show_synth_report(synth_file, full_report=False):
with open(synth_file, 'r') as f:
print('SYNTHESIS REPORT:')
for line in f.readlines()[2:]:
if not full_report and '* DSP48' in line:
break
print(line, end='')
<|reserved_special_token_1|>
from __future__ import print_function
import os
import re
import xml.etree.ElementTree as ET
def read_vivado_report(hls_dir, full_report=False):
if not os.path.exists(hls_dir):
print('Path {} does not exist. Exiting.'.format(hls_dir))
return
prj_dir = None
top_func_name = None
if os.path.isfile(hls_dir + '/build_prj.tcl'):
prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl')
if prj_dir is None or top_func_name is None:
print('Unable to read project data. Exiting.')
return
sln_dir = hls_dir + '/' + prj_dir
if not os.path.exists(sln_dir):
print('Project {} does not exist. Rerun "hls4ml build -p {}".'.format(prj_dir, hls_dir))
return
solutions = _find_solutions(sln_dir)
print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))
for sln in solutions:
print('Reports for solution "{}":\n'.format(sln))
_find_reports(sln_dir + '/' + sln, top_func_name, full_report)
def _parse_build_script(script_path):
prj_dir = None
top_func_name = None
with open(script_path, 'r') as f:
for line in f.readlines():
if 'open_project' in line:
prj_dir = line.split()[-1]
elif 'set_top' in line:
top_func_name = line.split()[-1]
return prj_dir, top_func_name
def _find_solutions(sln_dir):
solutions = []
if os.path.isfile(sln_dir + '/vivado_hls.app'):
with open(sln_dir + '/vivado_hls.app') as f:
# Get rid of namespaces (workaround to support two types of vivado_hls.app files)
xmlstring = re.sub(' xmlns="[^"]+"', '', f.read(), count=1)
root = ET.fromstring(xmlstring)
for sln_tag in root.findall('solutions/solution'):
sln_name = sln_tag.get('name')
if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name):
solutions.append(sln_name)
return solutions
def _find_reports(sln_dir, top_func_name, full_report=False):
csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)
if os.path.isfile(csim_file):
_show_csim_report(csim_file)
else:
print('C simulation report not found.')
syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)
if os.path.isfile(syn_file):
_show_synth_report(syn_file, full_report)
else:
print('Synthesis report not found.')
def _show_csim_report(csim_file):
with open(csim_file, 'r') as f:
print('C SIMULATION RESULT:')
print(f.read())
def _show_synth_report(synth_file, full_report=False):
with open(synth_file, 'r') as f:
print('SYNTHESIS REPORT:')
for line in f.readlines()[2:]:
if not full_report and '* DSP48' in line:
break
print(line, end = '')
|
flexible
|
{
"blob_id": "7d173b0571c20dc8fcae884451e8f69ba3a05763",
"index": 8087,
"step-1": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\n<mask token>\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\n<mask token>\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-2": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\n<mask token>\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-3": "<mask token>\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n return prj_dir, top_func_name\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-4": "from __future__ import print_function\nimport os\nimport re\nimport xml.etree.ElementTree as ET\n\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n prj_dir = None\n top_func_name = None\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl'\n )\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.\n format(prj_dir, hls_dir))\n return\n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n return prj_dir, top_func_name\n\n\ndef _find_solutions(sln_dir):\n solutions = []\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name\n ):\n solutions.append(sln_name)\n return solutions\n\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end='')\n",
"step-5": "from __future__ import print_function\nimport os\nimport re\nimport xml.etree.ElementTree as ET\n\ndef read_vivado_report(hls_dir, full_report=False):\n if not os.path.exists(hls_dir):\n print('Path {} does not exist. Exiting.'.format(hls_dir))\n return\n\n prj_dir = None\n top_func_name = None\n\n if os.path.isfile(hls_dir + '/build_prj.tcl'):\n prj_dir, top_func_name = _parse_build_script(hls_dir + '/build_prj.tcl')\n\n if prj_dir is None or top_func_name is None:\n print('Unable to read project data. Exiting.')\n return\n \n sln_dir = hls_dir + '/' + prj_dir\n if not os.path.exists(sln_dir):\n print('Project {} does not exist. Rerun \"hls4ml build -p {}\".'.format(prj_dir, hls_dir))\n return\n \n solutions = _find_solutions(sln_dir)\n print('Found {} solution(s) in {}.'.format(len(solutions), sln_dir))\n\n for sln in solutions:\n print('Reports for solution \"{}\":\\n'.format(sln))\n _find_reports(sln_dir + '/' + sln, top_func_name, full_report)\n\ndef _parse_build_script(script_path):\n prj_dir = None\n top_func_name = None\n\n with open(script_path, 'r') as f:\n for line in f.readlines():\n if 'open_project' in line:\n prj_dir = line.split()[-1]\n elif 'set_top' in line:\n top_func_name = line.split()[-1]\n \n return prj_dir, top_func_name\n\ndef _find_solutions(sln_dir):\n solutions = []\n\n if os.path.isfile(sln_dir + '/vivado_hls.app'):\n with open(sln_dir + '/vivado_hls.app') as f:\n # Get rid of namespaces (workaround to support two types of vivado_hls.app files)\n xmlstring = re.sub(' xmlns=\"[^\"]+\"', '', f.read(), count=1)\n\n root = ET.fromstring(xmlstring)\n for sln_tag in root.findall('solutions/solution'):\n sln_name = sln_tag.get('name')\n if sln_name is not None and os.path.isdir(sln_dir + '/' + sln_name):\n solutions.append(sln_name)\n \n return solutions\n\ndef _find_reports(sln_dir, top_func_name, full_report=False):\n csim_file = sln_dir + '/csim/report/{}_csim.log'.format(top_func_name)\n if os.path.isfile(csim_file):\n _show_csim_report(csim_file)\n else:\n print('C simulation report not found.')\n \n syn_file = sln_dir + '/syn/report/{}_csynth.rpt'.format(top_func_name)\n if os.path.isfile(syn_file):\n _show_synth_report(syn_file, full_report)\n else:\n print('Synthesis report not found.')\n\ndef _show_csim_report(csim_file):\n with open(csim_file, 'r') as f:\n print('C SIMULATION RESULT:')\n print(f.read())\n\ndef _show_synth_report(synth_file, full_report=False):\n with open(synth_file, 'r') as f:\n print('SYNTHESIS REPORT:')\n for line in f.readlines()[2:]:\n if not full_report and '* DSP48' in line:\n break\n print(line, end = '')\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
def restart():
root.destroy()
os.startfile('data\\programs\\game with tkinter.py')
def disableButton():
global l, restartButton, start
b1.config(state='disabled')
b2.config(state='disabled')
b3.config(state='disabled')
b4.config(state='disabled')
b5.config(state='disabled')
b6.config(state='disabled')
b7.config(state='disabled')
b8.config(state='disabled')
b9.config(state='disabled')
start.config(state='disabled')
restartButton.config(state='normal', command=restart, text=
' --->press to restart<--- ')
<|reserved_special_token_0|>
def funForB3():
global notPresentList, element, l
ans = notPresentList[2] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB4():
global notPresentList, element, l
ans = notPresentList[3] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB5():
global notPresentList, element, l
ans = notPresentList[4] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB6():
global notPresentList, element, l
ans = notPresentList[5] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB7():
global notPresentList, element, l
ans = notPresentList[6] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB8():
global notPresentList, element, l
ans = notPresentList[7] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB9():
global notPresentList, element, l
ans = notPresentList[8] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def present():
with open('data\\database\\present.txt', 'r') as file:
content = file.read().split('\n')
presentList = [content[random.randint(0, 400)], content[random.
randint(0, 400)], content[random.randint(0, 400)], content[
random.randint(0, 400)], content[random.randint(0, 400)],
content[random.randint(0, 400)], content[random.randint(0, 400)
], content[random.randint(0, 400)], content[random.randint(0, 400)]
]
element = presentList[random.randint(0, 8)]
return presentList, element
def notPresent():
global buttonList, start
with open('data\\database\\notpresent.txt', 'r') as file:
content = file.read().split('\n')
notPresentList = [content[random.randint(0, 35)], content[random.
randint(0, 35)], content[random.randint(0, 35)], content[random
.randint(0, 35)], content[random.randint(0, 35)], content[
random.randint(0, 35)], content[random.randint(0, 35)], content
[random.randint(0, 35)]]
start.config(state='normal')
obj = present()
presentList, element = obj[0], obj[1]
for i in range(9):
buttonList[i].config(text=presentList[i], state='disabled')
notPresentList.insert(random.randint(0, 9), element)
return notPresentList, element
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def restart():
root.destroy()
os.startfile('data\\programs\\game with tkinter.py')
def disableButton():
global l, restartButton, start
b1.config(state='disabled')
b2.config(state='disabled')
b3.config(state='disabled')
b4.config(state='disabled')
b5.config(state='disabled')
b6.config(state='disabled')
b7.config(state='disabled')
b8.config(state='disabled')
b9.config(state='disabled')
start.config(state='disabled')
restartButton.config(state='normal', command=restart, text=
' --->press to restart<--- ')
<|reserved_special_token_0|>
def funForB2():
global notPresentList, element, l
ans = notPresentList[1] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB3():
global notPresentList, element, l
ans = notPresentList[2] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB4():
global notPresentList, element, l
ans = notPresentList[3] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB5():
global notPresentList, element, l
ans = notPresentList[4] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB6():
global notPresentList, element, l
ans = notPresentList[5] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB7():
global notPresentList, element, l
ans = notPresentList[6] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB8():
global notPresentList, element, l
ans = notPresentList[7] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB9():
global notPresentList, element, l
ans = notPresentList[8] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def present():
with open('data\\database\\present.txt', 'r') as file:
content = file.read().split('\n')
presentList = [content[random.randint(0, 400)], content[random.
randint(0, 400)], content[random.randint(0, 400)], content[
random.randint(0, 400)], content[random.randint(0, 400)],
content[random.randint(0, 400)], content[random.randint(0, 400)
], content[random.randint(0, 400)], content[random.randint(0, 400)]
]
element = presentList[random.randint(0, 8)]
return presentList, element
def notPresent():
global buttonList, start
with open('data\\database\\notpresent.txt', 'r') as file:
content = file.read().split('\n')
notPresentList = [content[random.randint(0, 35)], content[random.
randint(0, 35)], content[random.randint(0, 35)], content[random
.randint(0, 35)], content[random.randint(0, 35)], content[
random.randint(0, 35)], content[random.randint(0, 35)], content
[random.randint(0, 35)]]
start.config(state='normal')
obj = present()
presentList, element = obj[0], obj[1]
for i in range(9):
buttonList[i].config(text=presentList[i], state='disabled')
notPresentList.insert(random.randint(0, 9), element)
return notPresentList, element
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def restart():
root.destroy()
os.startfile('data\\programs\\game with tkinter.py')
def disableButton():
global l, restartButton, start
b1.config(state='disabled')
b2.config(state='disabled')
b3.config(state='disabled')
b4.config(state='disabled')
b5.config(state='disabled')
b6.config(state='disabled')
b7.config(state='disabled')
b8.config(state='disabled')
b9.config(state='disabled')
start.config(state='disabled')
restartButton.config(state='normal', command=restart, text=
' --->press to restart<--- ')
def funForB1():
global notPresentList, element, l, start
ans = notPresentList[0] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB2():
global notPresentList, element, l
ans = notPresentList[1] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB3():
global notPresentList, element, l
ans = notPresentList[2] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB4():
global notPresentList, element, l
ans = notPresentList[3] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB5():
global notPresentList, element, l
ans = notPresentList[4] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB6():
global notPresentList, element, l
ans = notPresentList[5] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB7():
global notPresentList, element, l
ans = notPresentList[6] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB8():
global notPresentList, element, l
ans = notPresentList[7] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB9():
global notPresentList, element, l
ans = notPresentList[8] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def present():
with open('data\\database\\present.txt', 'r') as file:
content = file.read().split('\n')
presentList = [content[random.randint(0, 400)], content[random.
randint(0, 400)], content[random.randint(0, 400)], content[
random.randint(0, 400)], content[random.randint(0, 400)],
content[random.randint(0, 400)], content[random.randint(0, 400)
], content[random.randint(0, 400)], content[random.randint(0, 400)]
]
element = presentList[random.randint(0, 8)]
return presentList, element
def notPresent():
global buttonList, start
with open('data\\database\\notpresent.txt', 'r') as file:
content = file.read().split('\n')
notPresentList = [content[random.randint(0, 35)], content[random.
randint(0, 35)], content[random.randint(0, 35)], content[random
.randint(0, 35)], content[random.randint(0, 35)], content[
random.randint(0, 35)], content[random.randint(0, 35)], content
[random.randint(0, 35)]]
start.config(state='normal')
obj = present()
presentList, element = obj[0], obj[1]
for i in range(9):
buttonList[i].config(text=presentList[i], state='disabled')
notPresentList.insert(random.randint(0, 9), element)
return notPresentList, element
def start():
global buttonList, start, notPresentList, element
start.config(state='disabled')
for i in range(9):
buttonList[i].config(text=notPresentList[i], state='normal')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def restart():
root.destroy()
os.startfile('data\\programs\\game with tkinter.py')
def disableButton():
global l, restartButton, start
b1.config(state='disabled')
b2.config(state='disabled')
b3.config(state='disabled')
b4.config(state='disabled')
b5.config(state='disabled')
b6.config(state='disabled')
b7.config(state='disabled')
b8.config(state='disabled')
b9.config(state='disabled')
start.config(state='disabled')
restartButton.config(state='normal', command=restart, text=
' --->press to restart<--- ')
def funForB1():
global notPresentList, element, l, start
ans = notPresentList[0] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB2():
global notPresentList, element, l
ans = notPresentList[1] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB3():
global notPresentList, element, l
ans = notPresentList[2] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB4():
global notPresentList, element, l
ans = notPresentList[3] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB5():
global notPresentList, element, l
ans = notPresentList[4] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB6():
global notPresentList, element, l
ans = notPresentList[5] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB7():
global notPresentList, element, l
ans = notPresentList[6] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB8():
global notPresentList, element, l
ans = notPresentList[7] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB9():
global notPresentList, element, l
ans = notPresentList[8] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def present():
with open('data\\database\\present.txt', 'r') as file:
content = file.read().split('\n')
presentList = [content[random.randint(0, 400)], content[random.
randint(0, 400)], content[random.randint(0, 400)], content[
random.randint(0, 400)], content[random.randint(0, 400)],
content[random.randint(0, 400)], content[random.randint(0, 400)
], content[random.randint(0, 400)], content[random.randint(0, 400)]
]
element = presentList[random.randint(0, 8)]
return presentList, element
def notPresent():
global buttonList, start
with open('data\\database\\notpresent.txt', 'r') as file:
content = file.read().split('\n')
notPresentList = [content[random.randint(0, 35)], content[random.
randint(0, 35)], content[random.randint(0, 35)], content[random
.randint(0, 35)], content[random.randint(0, 35)], content[
random.randint(0, 35)], content[random.randint(0, 35)], content
[random.randint(0, 35)]]
start.config(state='normal')
obj = present()
presentList, element = obj[0], obj[1]
for i in range(9):
buttonList[i].config(text=presentList[i], state='disabled')
notPresentList.insert(random.randint(0, 9), element)
return notPresentList, element
def start():
global buttonList, start, notPresentList, element
start.config(state='disabled')
for i in range(9):
buttonList[i].config(text=notPresentList[i], state='normal')
<|reserved_special_token_0|>
root.title('Memory Game')
root.geometry('400x500')
root.resizable(0, 0)
root.config(bg='white')
<|reserved_special_token_0|>
start.place(x=150, y=110)
<|reserved_special_token_0|>
frameMain.place(x=10, y=150)
<|reserved_special_token_0|>
l.place(x=180, y=5)
<|reserved_special_token_0|>
b1.place(x=10, y=16)
b2.place(x=150, y=16)
b3.place(x=290, y=16)
b4.place(x=10, y=110)
b5.place(x=150, y=110)
b6.place(x=290, y=110)
b7.place(x=10, y=204)
b8.place(x=150, y=204)
b9.place(x=290, y=204)
<|reserved_special_token_0|>
restartButton.place(x=60, y=460)
<|reserved_special_token_0|>
root.mainloop()
<|reserved_special_token_1|>
import time,random,os
from tkinter import *
def restart():
root.destroy()
os.startfile(r"data\programs\game with tkinter.py")
def disableButton():
global l,restartButton,start
b1.config(state="disabled")
b2.config(state="disabled")
b3.config(state="disabled")
b4.config(state="disabled")
b5.config(state="disabled")
b6.config(state="disabled")
b7.config(state="disabled")
b8.config(state="disabled")
b9.config(state="disabled")
start.config(state="disabled")
restartButton.config(state="normal",command=restart,text=" --->press to restart<--- ")
def funForB1():
global notPresentList,element,l,start
ans = notPresentList[0] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB2():
global notPresentList,element,l
ans = notPresentList[1] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB3():
global notPresentList,element,l
ans = notPresentList[2] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB4():
global notPresentList,element,l
ans = notPresentList[3] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB5():
global notPresentList,element,l
ans = notPresentList[4] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB6():
global notPresentList,element,l
ans = notPresentList[5] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB7():
global notPresentList,element,l
ans = notPresentList[6] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB8():
global notPresentList,element,l
ans = notPresentList[7] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def funForB9():
global notPresentList,element,l
ans = notPresentList[8] == element
if ans:
l.config(image=image1)
else:
l.config(image=image2)
disableButton()
def present():
with open(r"data\database\present.txt", "r") as file:
content = file.read().split("\n")
presentList = [
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)],
content[random.randint(0,400)]
]
element = presentList[random.randint(0,8)]
return (presentList,element)
def notPresent():
global buttonList,start
with open(r"data\database\notpresent.txt","r") as file:
content = file.read().split("\n")
notPresentList = [
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
content[random.randint(0,35)],
]
start.config(state="normal")
obj = present()
presentList,element = obj[0],obj[1]
for i in range(9):
buttonList[i].config(text = presentList[i], state="disabled")
notPresentList.insert(random.randint(0,9),element)
return (notPresentList,element)
def start():
global buttonList,start,notPresentList,element
start.config(state="disabled")
for i in range(9):
buttonList[i].config(text = notPresentList[i], state="normal")
# main
root =Tk()
root.title("Memory Game")
root.geometry("400x500")
root.resizable(0,0)
root.config(bg="white")
image1 = PhotoImage(file=r"data\img\smiley.png")
image2 = PhotoImage(file=r"data\img\pleading.png")
start = Button(root, bg="black", fg="white", text="-->Start<--", font="comicsansms 15 bold", command=start, relief="raised",state="normal", bd=2)
start.place(x=150,y=110)
frameMain = Frame(root, relief="flat", bd=1, background="white", width=400, height=417)
frameMain.place(x=10, y=150)
image=PhotoImage(file=r"data\img\emoji.png")
l=Label(root,image=image ,font="comicsansms 15 bold", fg="black", bg="white")
l.place(x=180,y=5)
b1=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB1)
b2=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB2)
b3=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB3)
b4=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB4)
b5=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB5)
b6=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB6)
b7=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB7)
b8=Button(frameMain, bg='teal', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB8)
b9=Button(frameMain, bg='cyan', text="plz start", fg="white", width=10, height=5, relief='raised',bd=3, state="normal",disabledforeground="white",command = funForB9)
b1.place(x=10,y=16)
b2.place(x=150,y=16)
b3.place(x=290,y=16)
b4.place(x=10,y=110)
b5.place(x=150,y=110)
b6.place(x=290,y=110)
b7.place(x=10,y=204)
b8.place(x=150,y=204)
b9.place(x=290,y=204)
buttonList = [b1,b2,b3,b4,b5,b6,b7,b8,b9]
restartButton = Button(root, bg="teal", fg="white", text="!!! Remember these items !!!", font="comicsansms 15 bold", relief="raised",state="disabled",disabledforeground="white")
restartButton.place(x=60,y=460)
obj = notPresent()
notPresentList,element = obj[0],obj[1]
root.mainloop()
|
flexible
|
{
"blob_id": "e70c5c9a62faa4c501c0f103ce0a0a419aaf4301",
"index": 2096,
"step-1": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\n<mask token>\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\n<mask token>\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\ndef funForB1():\n global notPresentList, element, l, start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\ndef start():\n global buttonList, start, notPresentList, element\n start.config(state='disabled')\n for i in range(9):\n buttonList[i].config(text=notPresentList[i], state='normal')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef restart():\n root.destroy()\n os.startfile('data\\\\programs\\\\game with tkinter.py')\n\n\ndef disableButton():\n global l, restartButton, start\n b1.config(state='disabled')\n b2.config(state='disabled')\n b3.config(state='disabled')\n b4.config(state='disabled')\n b5.config(state='disabled')\n b6.config(state='disabled')\n b7.config(state='disabled')\n b8.config(state='disabled')\n b9.config(state='disabled')\n start.config(state='disabled')\n restartButton.config(state='normal', command=restart, text=\n ' --->press to restart<--- ')\n\n\ndef funForB1():\n global notPresentList, element, l, start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB2():\n global notPresentList, element, l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB3():\n global notPresentList, element, l\n ans = notPresentList[2] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB4():\n global notPresentList, element, l\n ans = notPresentList[3] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB5():\n global notPresentList, element, l\n ans = notPresentList[4] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB6():\n global notPresentList, element, l\n ans = notPresentList[5] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB7():\n global notPresentList, element, l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB8():\n global notPresentList, element, l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef funForB9():\n global notPresentList, element, l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open('data\\\\database\\\\present.txt', 'r') as file:\n content = file.read().split('\\n')\n presentList = [content[random.randint(0, 400)], content[random.\n randint(0, 400)], content[random.randint(0, 400)], content[\n random.randint(0, 400)], content[random.randint(0, 400)],\n content[random.randint(0, 400)], content[random.randint(0, 400)\n ], content[random.randint(0, 400)], content[random.randint(0, 400)]\n ]\n element = presentList[random.randint(0, 8)]\n return presentList, element\n\n\ndef notPresent():\n global buttonList, start\n with open('data\\\\database\\\\notpresent.txt', 'r') as file:\n content = file.read().split('\\n')\n notPresentList = [content[random.randint(0, 35)], content[random.\n randint(0, 35)], content[random.randint(0, 35)], content[random\n .randint(0, 35)], content[random.randint(0, 35)], content[\n random.randint(0, 35)], content[random.randint(0, 35)], content\n [random.randint(0, 35)]]\n start.config(state='normal')\n obj = present()\n presentList, element = obj[0], obj[1]\n for i in range(9):\n buttonList[i].config(text=presentList[i], state='disabled')\n notPresentList.insert(random.randint(0, 9), element)\n return notPresentList, element\n\n\ndef start():\n global buttonList, start, notPresentList, element\n start.config(state='disabled')\n for i in range(9):\n buttonList[i].config(text=notPresentList[i], state='normal')\n\n\n<mask token>\nroot.title('Memory Game')\nroot.geometry('400x500')\nroot.resizable(0, 0)\nroot.config(bg='white')\n<mask token>\nstart.place(x=150, y=110)\n<mask token>\nframeMain.place(x=10, y=150)\n<mask token>\nl.place(x=180, y=5)\n<mask token>\nb1.place(x=10, y=16)\nb2.place(x=150, y=16)\nb3.place(x=290, y=16)\nb4.place(x=10, y=110)\nb5.place(x=150, y=110)\nb6.place(x=290, y=110)\nb7.place(x=10, y=204)\nb8.place(x=150, y=204)\nb9.place(x=290, y=204)\n<mask token>\nrestartButton.place(x=60, y=460)\n<mask token>\nroot.mainloop()\n",
"step-5": "import time,random,os\nfrom tkinter import *\n\ndef restart():\n root.destroy()\n os.startfile(r\"data\\programs\\game with tkinter.py\")\n \ndef disableButton():\n global l,restartButton,start\n b1.config(state=\"disabled\")\n b2.config(state=\"disabled\")\n b3.config(state=\"disabled\")\n b4.config(state=\"disabled\")\n b5.config(state=\"disabled\")\n b6.config(state=\"disabled\")\n b7.config(state=\"disabled\")\n b8.config(state=\"disabled\")\n b9.config(state=\"disabled\")\n start.config(state=\"disabled\")\n restartButton.config(state=\"normal\",command=restart,text=\" --->press to restart<--- \")\n \ndef funForB1():\n global notPresentList,element,l,start\n ans = notPresentList[0] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB2():\n global notPresentList,element,l\n ans = notPresentList[1] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB3():\n global notPresentList,element,l\n ans = notPresentList[2] == element\n if ans:\n \n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB4():\n global notPresentList,element,l\n ans = notPresentList[3] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB5():\n global notPresentList,element,l\n ans = notPresentList[4] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB6():\n global notPresentList,element,l\n ans = notPresentList[5] == element\n if ans:\n\n l.config(image=image1)\n else:\n\n l.config(image=image2)\n disableButton()\n\ndef funForB7():\n global notPresentList,element,l\n ans = notPresentList[6] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB8():\n global notPresentList,element,l\n ans = notPresentList[7] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\ndef funForB9():\n global notPresentList,element,l\n ans = notPresentList[8] == element\n if ans:\n l.config(image=image1)\n else:\n l.config(image=image2)\n disableButton()\n\n\ndef present():\n with open(r\"data\\database\\present.txt\", \"r\") as file:\n content = file.read().split(\"\\n\")\n presentList = [\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)],\n content[random.randint(0,400)]\n ]\n \n element = presentList[random.randint(0,8)]\n return (presentList,element)\n\ndef notPresent():\n global buttonList,start\n with open(r\"data\\database\\notpresent.txt\",\"r\") as file:\n content = file.read().split(\"\\n\")\n notPresentList = [\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n content[random.randint(0,35)],\n ]\n start.config(state=\"normal\")\n obj = present()\n presentList,element = obj[0],obj[1]\n for i in range(9):\n buttonList[i].config(text = presentList[i], state=\"disabled\")\n notPresentList.insert(random.randint(0,9),element)\n\n return (notPresentList,element)\n\ndef start():\n global buttonList,start,notPresentList,element\n start.config(state=\"disabled\")\n\n for i in range(9):\n buttonList[i].config(text = notPresentList[i], state=\"normal\")\n\n \n \n\n \n# main\n\nroot =Tk()\nroot.title(\"Memory Game\")\nroot.geometry(\"400x500\")\nroot.resizable(0,0)\nroot.config(bg=\"white\")\n\nimage1 = PhotoImage(file=r\"data\\img\\smiley.png\")\nimage2 = PhotoImage(file=r\"data\\img\\pleading.png\")\n\n\nstart = Button(root, bg=\"black\", fg=\"white\", text=\"-->Start<--\", font=\"comicsansms 15 bold\", command=start, relief=\"raised\",state=\"normal\", bd=2)\nstart.place(x=150,y=110)\n\n\n\nframeMain = Frame(root, relief=\"flat\", bd=1, background=\"white\", width=400, height=417)\nframeMain.place(x=10, y=150)\n\n\nimage=PhotoImage(file=r\"data\\img\\emoji.png\")\nl=Label(root,image=image ,font=\"comicsansms 15 bold\", fg=\"black\", bg=\"white\")\nl.place(x=180,y=5)\n\nb1=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB1)\nb2=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB2)\nb3=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB3)\nb4=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB4)\nb5=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB5)\nb6=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB6)\nb7=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB7)\nb8=Button(frameMain, bg='teal', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB8)\nb9=Button(frameMain, bg='cyan', text=\"plz start\", fg=\"white\", width=10, height=5, relief='raised',bd=3, state=\"normal\",disabledforeground=\"white\",command = funForB9)\n\n\nb1.place(x=10,y=16)\nb2.place(x=150,y=16)\nb3.place(x=290,y=16)\nb4.place(x=10,y=110)\nb5.place(x=150,y=110)\nb6.place(x=290,y=110)\nb7.place(x=10,y=204)\nb8.place(x=150,y=204)\nb9.place(x=290,y=204)\n\nbuttonList = [b1,b2,b3,b4,b5,b6,b7,b8,b9]\n\n\nrestartButton = Button(root, bg=\"teal\", fg=\"white\", text=\"!!! Remember these items !!!\", font=\"comicsansms 15 bold\", relief=\"raised\",state=\"disabled\",disabledforeground=\"white\")\nrestartButton.place(x=60,y=460)\nobj = notPresent()\nnotPresentList,element = obj[0],obj[1]\n\nroot.mainloop()\n",
"step-ids": [
11,
12,
14,
15,
18
]
}
|
[
11,
12,
14,
15,
18
] |
<|reserved_special_token_0|>
class _RestrictData:
__slots__ = ()
<|reserved_special_token_0|>
class RestrictBlend:
__slots__ = 'context', 'data'
def __enter__(self):
self.data = _bpy.data
self.context = _bpy.context
_bpy.data = _data_restrict
_bpy.context = _context_restrict
def __exit__(self, type, value, traceback):
_bpy.data = self.data
_bpy.context = self.context
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _RestrictContext:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def window_manager(self):
return self._real_data.window_managers[0]
@property
def preferences(self):
return self._real_pref
class _RestrictData:
__slots__ = ()
<|reserved_special_token_0|>
class RestrictBlend:
__slots__ = 'context', 'data'
def __enter__(self):
self.data = _bpy.data
self.context = _bpy.context
_bpy.data = _data_restrict
_bpy.context = _context_restrict
def __exit__(self, type, value, traceback):
_bpy.data = self.data
_bpy.context = self.context
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class _RestrictContext:
__slots__ = ()
_real_data = _bpy.data
_real_pref = _bpy.context.preferences
@property
def window_manager(self):
return self._real_data.window_managers[0]
@property
def preferences(self):
return self._real_pref
class _RestrictData:
__slots__ = ()
<|reserved_special_token_0|>
class RestrictBlend:
__slots__ = 'context', 'data'
def __enter__(self):
self.data = _bpy.data
self.context = _bpy.context
_bpy.data = _data_restrict
_bpy.context = _context_restrict
def __exit__(self, type, value, traceback):
_bpy.data = self.data
_bpy.context = self.context
<|reserved_special_token_1|>
<|reserved_special_token_0|>
__all__ = 'RestrictBlend',
<|reserved_special_token_0|>
class _RestrictContext:
__slots__ = ()
_real_data = _bpy.data
_real_pref = _bpy.context.preferences
@property
def window_manager(self):
return self._real_data.window_managers[0]
@property
def preferences(self):
return self._real_pref
class _RestrictData:
__slots__ = ()
_context_restrict = _RestrictContext()
_data_restrict = _RestrictData()
class RestrictBlend:
__slots__ = 'context', 'data'
def __enter__(self):
self.data = _bpy.data
self.context = _bpy.context
_bpy.data = _data_restrict
_bpy.context = _context_restrict
def __exit__(self, type, value, traceback):
_bpy.data = self.data
_bpy.context = self.context
<|reserved_special_token_1|>
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8-80 compliant>
"""
This module contains RestrictBlend context manager.
"""
__all__ = (
"RestrictBlend",
)
import bpy as _bpy
class _RestrictContext:
__slots__ = ()
_real_data = _bpy.data
# safe, the pointer never changes
_real_pref = _bpy.context.preferences
@property
def window_manager(self):
return self._real_data.window_managers[0]
@property
def preferences(self):
return self._real_pref
class _RestrictData:
__slots__ = ()
_context_restrict = _RestrictContext()
_data_restrict = _RestrictData()
class RestrictBlend:
__slots__ = ("context", "data")
def __enter__(self):
self.data = _bpy.data
self.context = _bpy.context
_bpy.data = _data_restrict
_bpy.context = _context_restrict
def __exit__(self, type, value, traceback):
_bpy.data = self.data
_bpy.context = self.context
|
flexible
|
{
"blob_id": "aa4226c377368d1ece4e556db9b7fdd0134472c9",
"index": 5450,
"step-1": "<mask token>\n\n\nclass _RestrictData:\n __slots__ = ()\n\n\n<mask token>\n\n\nclass RestrictBlend:\n __slots__ = 'context', 'data'\n\n def __enter__(self):\n self.data = _bpy.data\n self.context = _bpy.context\n _bpy.data = _data_restrict\n _bpy.context = _context_restrict\n\n def __exit__(self, type, value, traceback):\n _bpy.data = self.data\n _bpy.context = self.context\n",
"step-2": "<mask token>\n\n\nclass _RestrictContext:\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def window_manager(self):\n return self._real_data.window_managers[0]\n\n @property\n def preferences(self):\n return self._real_pref\n\n\nclass _RestrictData:\n __slots__ = ()\n\n\n<mask token>\n\n\nclass RestrictBlend:\n __slots__ = 'context', 'data'\n\n def __enter__(self):\n self.data = _bpy.data\n self.context = _bpy.context\n _bpy.data = _data_restrict\n _bpy.context = _context_restrict\n\n def __exit__(self, type, value, traceback):\n _bpy.data = self.data\n _bpy.context = self.context\n",
"step-3": "<mask token>\n\n\nclass _RestrictContext:\n __slots__ = ()\n _real_data = _bpy.data\n _real_pref = _bpy.context.preferences\n\n @property\n def window_manager(self):\n return self._real_data.window_managers[0]\n\n @property\n def preferences(self):\n return self._real_pref\n\n\nclass _RestrictData:\n __slots__ = ()\n\n\n<mask token>\n\n\nclass RestrictBlend:\n __slots__ = 'context', 'data'\n\n def __enter__(self):\n self.data = _bpy.data\n self.context = _bpy.context\n _bpy.data = _data_restrict\n _bpy.context = _context_restrict\n\n def __exit__(self, type, value, traceback):\n _bpy.data = self.data\n _bpy.context = self.context\n",
"step-4": "<mask token>\n__all__ = 'RestrictBlend',\n<mask token>\n\n\nclass _RestrictContext:\n __slots__ = ()\n _real_data = _bpy.data\n _real_pref = _bpy.context.preferences\n\n @property\n def window_manager(self):\n return self._real_data.window_managers[0]\n\n @property\n def preferences(self):\n return self._real_pref\n\n\nclass _RestrictData:\n __slots__ = ()\n\n\n_context_restrict = _RestrictContext()\n_data_restrict = _RestrictData()\n\n\nclass RestrictBlend:\n __slots__ = 'context', 'data'\n\n def __enter__(self):\n self.data = _bpy.data\n self.context = _bpy.context\n _bpy.data = _data_restrict\n _bpy.context = _context_restrict\n\n def __exit__(self, type, value, traceback):\n _bpy.data = self.data\n _bpy.context = self.context\n",
"step-5": "# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# <pep8-80 compliant>\n\n\"\"\"\nThis module contains RestrictBlend context manager.\n\"\"\"\n\n__all__ = (\n \"RestrictBlend\",\n)\n\nimport bpy as _bpy\n\n\nclass _RestrictContext:\n __slots__ = ()\n _real_data = _bpy.data\n # safe, the pointer never changes\n _real_pref = _bpy.context.preferences\n\n @property\n def window_manager(self):\n return self._real_data.window_managers[0]\n\n @property\n def preferences(self):\n return self._real_pref\n\n\nclass _RestrictData:\n __slots__ = ()\n\n\n_context_restrict = _RestrictContext()\n_data_restrict = _RestrictData()\n\n\nclass RestrictBlend:\n __slots__ = (\"context\", \"data\")\n\n def __enter__(self):\n self.data = _bpy.data\n self.context = _bpy.context\n _bpy.data = _data_restrict\n _bpy.context = _context_restrict\n\n def __exit__(self, type, value, traceback):\n _bpy.data = self.data\n _bpy.context = self.context\n",
"step-ids": [
6,
9,
10,
11,
13
]
}
|
[
6,
9,
10,
11,
13
] |
# coding: utf-8
import logging
import uuid
import json
import xmltodict
import bottle
from bottle import HTTPError
from bottle.ext import sqlalchemy
from database import Base, engine
from database import JdWaybillSendResp, JdWaybillApplyResp
jd = bottle.Bottle(catchall=False)
plugin = sqlalchemy.Plugin(
engine, # SQLAlchemy engine created with create_engine function.
Base.metadata, # SQLAlchemy metadata, required only if create=True.
keyword='db', # Keyword used to inject session database in a route (default 'db').
create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).
commit=True, # If it is true, plugin commit changes after route is executed (default True).
use_kwargs=False
# If it is true and keyword is not defined,
# plugin uses **kwargs argument to inject session database (default False).
)
jd.install(plugin)
@jd.get('/routerjson')
def apply_jd_waybill(db):
query = bottle.request.query
if query['method'] == 'jingdong.etms.waybillcode.get':
jd_code, resp = jd_get_response_normal()
logging.debug('JD response: {} {}'.format(jd_code, resp))
db.add(JdWaybillApplyResp(jd_code, resp))
else: # '''jingdong.etms.waybillcode.send'''
jd_param = json.loads(query['360buy_param_json'])
delivery_id = jd_param['deliveryId']
order_id = jd_param['orderId']
resp = jd_send_response_normal(delivery_id, order_id)
db.add(JdWaybillSendResp(delivery_id, order_id, resp))
logging.debug('JD response: {}'.format(resp))
return resp
@jd.get('/jd_waybill')
def jd_waybill(db):
query = bottle.request.query
jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get('wms_order_code')).first()
if jd_rsp:
# return entities
return jd_rsp.body
return HTTPError(404, None)
def jd_get_response_normal():
code = str(uuid.uuid4()).split('-')[-1]
return code, json.dumps({
'jingdong_etms_waybillcode_get_responce':
{'resultInfo':
{'message': u'成功',
'code': 100,
'deliveryIdList': [code]
},
'code': u'0'
}
})
def jd_send_response_normal(deliver_id, order_id):
return json.dumps({
"jingdong_etms_waybill_send_responce": {
"resultInfo": {
"message": u"成功",
"deliveryId": deliver_id,
"code": 100,
"orderId": order_id
}
}
})
|
normal
|
{
"blob_id": "a93884757069393b4d96de5ec9c7d815d58a2ea5",
"index": 935,
"step-1": "<mask token>\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"step-2": "<mask token>\njd.install(plugin)\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"step-3": "<mask token>\njd = bottle.Bottle(catchall=False)\nplugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,\n commit=True, use_kwargs=False)\njd.install(plugin)\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"step-4": "import logging\nimport uuid\nimport json\nimport xmltodict\nimport bottle\nfrom bottle import HTTPError\nfrom bottle.ext import sqlalchemy\nfrom database import Base, engine\nfrom database import JdWaybillSendResp, JdWaybillApplyResp\njd = bottle.Bottle(catchall=False)\nplugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,\n commit=True, use_kwargs=False)\njd.install(plugin)\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"step-5": "# coding: utf-8\nimport logging\nimport uuid\nimport json\nimport xmltodict\nimport bottle\nfrom bottle import HTTPError\nfrom bottle.ext import sqlalchemy\nfrom database import Base, engine\nfrom database import JdWaybillSendResp, JdWaybillApplyResp\n\njd = bottle.Bottle(catchall=False)\n\nplugin = sqlalchemy.Plugin(\n engine, # SQLAlchemy engine created with create_engine function.\n Base.metadata, # SQLAlchemy metadata, required only if create=True.\n keyword='db', # Keyword used to inject session database in a route (default 'db').\n create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).\n commit=True, # If it is true, plugin commit changes after route is executed (default True).\n use_kwargs=False\n # If it is true and keyword is not defined,\n # plugin uses **kwargs argument to inject session database (default False).\n)\n\njd.install(plugin)\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else: # '''jingdong.etms.waybillcode.send'''\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get('wms_order_code')).first()\n if jd_rsp:\n # return entities\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({\n 'jingdong_etms_waybillcode_get_responce':\n {'resultInfo':\n {'message': u'成功',\n 'code': 100,\n 'deliveryIdList': [code]\n },\n 'code': u'0'\n }\n })\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({\n \"jingdong_etms_waybill_send_responce\": {\n \"resultInfo\": {\n \"message\": u\"成功\",\n \"deliveryId\": deliver_id,\n \"code\": 100,\n \"orderId\": order_id\n }\n }\n })\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from firstfuncs_1618 import *
figdir='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/Linear/'
figdir_paper='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/paperfigs'
########################################################################################################
########################################################################################################
#### Set up the optimization framework, which allows for varying almost all elements within a prescribed range
########################################################################################################
########################################################################################################
WM=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_2008.nc')
WM_mb=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_mb_2008.nc')
cp=3850
rhow=1025
tera=10**12
#Noresm (taking sea ice into account)
Q=-251*tera/rhow/cp/1e6 #for the Sverdrups
def get_U_S_T_from_WM(WM):
U={}
S={}
T={}
for wm in WM.WM:
U[str(wm.values)]=float(WM['TRANS'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
S[str(wm.values)]=float(WM['PSAL'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
T[str(wm.values)]=float(WM['PTMP'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)
U['SI']=0.073 # NorESM fresh water input v. similar to Kwok et al. 2004 70mSv
U['FW']=0.028 # mean E-P from JRA55
U['Q']=Q
S['SI']=0
S['FW']=0
T['SI']=0
T['FW']=0
T['Q']=1
return U,S,T
U,S,T=get_U_S_T_from_WM(WM)
U_mb,S_mb,T_mb=get_U_S_T_from_WM(WM_mb)
def get_U_from_x(x):
U={}
U['PWS']=x[0]
U['AWS']=x[1]
U['DWS']=x[2]
U['PWN']=x[3]
U['AWN']=x[4]
U['FW']=x[5]
U['SI']=x[6]
U['Q']=x[7]
return U
AM={}
x0={}
AM['base']=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
x0['base']=[U['PWS'],U['AWS'],U['DWS'],U['PWN'],U['AWN'],U['FW'],U['SI'],U['Q']]
AM['massbal']=array([[1,1,1,0,0,0.5,0.5,0],\
[0,0,0,1,1,0.5,0.5,0],\
[S_mb['PWS'],S_mb['AWS'],S_mb['DWS'],S_mb['PWN'],S_mb['AWN'],S_mb['FW'],S_mb['SI'],0],\
[T_mb['PWS'],T_mb['AWS'],T_mb['DWS'],T_mb['PWN'],T_mb['AWN'],T_mb['FW'],T_mb['SI'],1]])
x0['massbal']=[U_mb['PWS'],U_mb['AWS'],U_mb['DWS'],U_mb['PWN'],U_mb['AWN'],U_mb['FW'],U_mb['SI'],U_mb['Q']]
zz='base'
AM[zz].dot(x0[zz])
16/35
1.5/10
#vars that I want to be handy for later calcs
Snorm=35
Tnorm=5
def run_inverse_model(zz,U,S,T):
dv=-AM[zz].dot(x0[zz])
if zz=='base':
Winv=diag([1,1/Snorm,1/Tnorm])
elif zz=='massbal':
Winv=diag([1,1,1/Snorm,1/Tnorm])
Evec=array([xx/5 for xx in x0[zz]])
# Evec=hstack((5*[1],0.02,0.02,Qvar))
E=diag(Evec)
Umat,D,VmatT=linalg.svd(Winv.dot(AM[zz].dot(E)))
Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T
Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
xbase=x0[zz]+xsol_Ad
P=diag(E-E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T))+linalg.inv(Winv)).dot(AM[zz].dot(E)))))
Ubase=get_U_from_x(xbase)
Ue=get_U_from_x(P)
return Ubase,Ue,xbase
Ubase,Ue,xbase=run_inverse_model('base',U,S,T)
Umb_sol,Umb_err,xmb=run_inverse_model('massbal',U_mb,S_mb,T_mb)
coldic={'AWS':'red','DWS':'grey','PWS':'royalblue','PWN':'purple','AWN':'orange','SI':'cyan','FW':'cyan','Q':'limegreen'}
def plot_base_case_simple(Ubase,Ue,plt):
f,axx=subplots(1,4,figsize=(9,2.5),constrained_layout=True,gridspec_kw=dict(width_ratios=[2,3,1,1]))
alf=0.75
capi=7
#U
axx[0].bar(range(2),[Ubase[kk] for kk in ['AWS','DWS']],color=[coldic[kk] for kk in ['AWS','DWS']],yerr=[Ue[kk] for kk in ['AWS','DWS']],capsize=capi,alpha=alf)
axx[0].plot(range(2),[U[kk] for kk in ['AWS','DWS']],'o',color='k')
ylimi=20
axx[0].set_ylim(-ylimi,ylimi)
ylimi=4
axx[1].set_ylim(-ylimi,ylimi)
axx[1].bar(range(3),[Ubase[kk] for kk in ['PWS','PWN','AWN']],color=[coldic[kk] for kk in ['PWS','PWN','AWN']],yerr=[Ue[kk] for kk in ['PWS','PWN','AWN']],capsize=capi,alpha=alf)
axx[1].plot(range(3),[U[kk] for kk in ['PWS','PWN','AWN']],'o',color='k')
axx[2].bar(range(1),U['SI']+Ubase['FW'],color=coldic['FW'],yerr=Ue['SI']+Ue['FW'],capsize=capi,alpha=alf)
axx[2].plot(range(1),U['SI']+U['FW'],'o',color='k')
fwlim=0.2
axx[2].set_ylim(-fwlim,fwlim)
fsz=14
axx[0].set_ylabel('Volume transport [Sv]',fontsize=fsz)
axx[3].set_ylabel('Heat flux [TW]',fontsize=fsz)
axx[3].bar(0,cp*rhow*(Ubase['Q'])/1e6,color=coldic['Q'],yerr=cp*rhow*Ue['Q']/1e6,capsize=capi,alpha=alf)
axx[3].plot(0,cp*rhow*(U['Q'])/1e6,'o',color='k')
for ii in range(3):
axx[ii].axhline(0,color='k')
axx[0].set_xticks(range(2))
axx[0].set_xticklabels(['AWS','DWS'])
axx[1].set_xticks(range(3))
axx[1].set_xticklabels(['PWS','PWN','AWN'])
axx[2].set_xticks(range(1))
axx[2].set_xticklabels(['FW'])
axx[3].set_xticks([0])
axx[3].set_xticklabels('Q')
savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.pdf',bbox_inches='tight')
plot_base_case_simple(Ubase,Ue,'base')
U
Ubase['SI']+Ubase['FW']
Ubase['Q']*cp*rhow/1e6
basediff=[(kk,Ubase[kk]-U[kk]) for kk in Ubase]
basediff
plot_base_case_simple(Umb_sol,Umb_err,'mb')
[(kk,Umb_sol[kk]-U_mb[kk]) for kk in Ubase]
##################################################################################
# Calculate fraction of fresh water vs. other water masses that goes into each limb
#################################################################################
#fraction of PWN in DWS limb
epsilon=arange(0,1.1,0.1)
def get_a_b_fracs(Ubase,S):
#fraction of FW in PWS, as a function of epsilon
a=((1-epsilon)*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['PWS']*(S['PWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])
#fraction of FW in DWS, as a function of epsilon
b=(epsilon*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['DWS']*(S['DWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])
return a,b
S['PWN']/S['AWS']
S['PWS']/S['AWS']
S['DWS']/S['AWS']
Ubase['PWS']
Ubase['DWS']
Ubase['PWN']*(S['PWN']/S['AWS']-1)
Ubase['PWS']*(S['PWS']/S['AWS']-1)
Ubase['DWS']*(S['DWS']/S['AWS']-1)
(Ubase['FW']+Ubase['SI'])
a={}
b={}
a['base'],b['base']=get_a_b_fracs(Ubase,S)
a['mb'],b['mb']=get_a_b_fracs(Umb_sol,S_mb)
[(kk,S[kk]-S_mb[kk]) for kk in S]
def plot_adep():
for ii,kk in enumerate(a):
plot(1-epsilon,a[kk],linewidth=3,label=kk,color='C'+str(ii))
xlabel('$\mathbf{1-\epsilon}$\nfraction of PWN in PWS')
ylabel('$\mathbf{a}$\n fraction of (FW + SI) in PWS')
xlim(0,1)
axhline(0,color='k')
legend()
savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.pdf',bbox_inches='tight')
plot_adep()
#################################################################################
##### Look into how much Sea ice properties matter
#################################################################################
sivar={}
for S_SI in range(0,10,2):
sivar[S_SI]={}
for T_SI in range(-90,5,10):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S_SI,0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T_SI,1]])
dv=-AM.dot(xbase)
Evec=array(hstack(([1]*5,xbase[-3:]/5)))
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
sivar[S_SI][T_SI]=xbase+xsol_Ad
def get_mats_from_dic(sivar):
Svec=array([float(ff) for ff in sivar])
Tvec=array([float(ff) for ff in sivar[Svec[0]]])
simats={}
for QQ,kk in enumerate(Ubase):
simats[kk]=zeros((len(Svec),len(Tvec)))
for ii,ss in enumerate(Svec):
for jj,tt in enumerate(Tvec):
simats[kk][ii,jj]=sivar[ss][tt][QQ]
return Svec,Tvec,simats
Svec,Tvec,simats=get_mats_from_dic(sivar)
def plot_SIresponse():
f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)
axivec=array([])
for axirow in axx:
for axi in axirow:
axivec=hstack((axivec,axi))
for axi,kk in zip(axivec,simats):
if (kk=='FW') | (kk=='SI'):
climi=10
contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)
axi.set_title(kk+' [mSv]')
cbar=colorbar(contit,ax=axi,format='%1.0f')
elif kk=='Q':
climi=30
contit=axi.contourf(Svec,Tvec,cp*rhow*(simats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)
axi.set_title(kk+' [TW]')
cbar=colorbar(contit,ax=axi,format='%2.0f')
else:
climi=0.3
contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)
axi.set_title(kk+' [Sv]')
cbar=colorbar(contit,ax=axi,format='%0.2f')
for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
label.set_visible(False)
f.text(0.5, 0, 'sea ice salinity', ha='center',fontsize=14)
f.text(0.05, 0.5, 'effective sea ice temperature [$^\circ$C]', va='center',rotation='vertical',fontsize=14)
savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.png',bbox_inches='tight')
savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.pdf',bbox_inches='tight')
plot_SIresponse()
contourf(simats['AWN'].T-Ubase['AWN']+simats['PWN'].T-Ubase['PWN'])
colorbar()
#################################################################################
##### Test dependence on PW salinity (both north and south)
#################################################################################
pwsvar={}
for S_PWNa in arange(-1,0.05,0.1):
pwsvar[S_PWNa]={}
for S_PWSa in arange(-1.0,0.05,0.1):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS']+S_PWSa,S['AWS'],S['DWS'],S['PWN']+S_PWNa,S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
dv=-AM.dot(xbase)
Evec=array(hstack(([1]*5,xbase[-3:]/5)))
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
pwsvar[S_PWNa][S_PWSa]=xbase+xsol_Ad
PWN_Svec,PWS_Svec,pwmats=get_mats_from_dic(pwsvar)
####################################################################################################
######## Response is pretty uniform: try to tease out a pattern (and look at other deps?) #######
##################################################################################################
PWN_Smat,PWS_Smat=meshgrid(PWN_Svec,PWS_Svec)
U_si=get_U_from_x(sivar[0][-30])
U_pw=get_U_from_x(pwsvar[-0.5000000000000001][-0.5000000000000001])
(U_pw['FW']+U_pw['SI']-(Ubase['FW']+Ubase['SI']))*1e3
U_pw['FW']+U_pw['SI']
Ubase['FW']+Ubase['SI']
U_si
[(kk,U_si[kk]-Ubase[kk]) for kk in Ubase]
[U_si[kk]-Ubase[kk] for kk in Ubase][-1]*cp*rhow/1e6
U_pw['Q']*cp*rhow/1e6
def lineplot_PW_salinity():
f,axx=subplots(1,3,figsize=(11,3),sharey=True)
xind=-1
yind=-1
svr=len(PWS_Svec)
xvar=[(S['PWN']+PWN_Smat)[xind,:],(S['PWS']+PWS_Smat)[:,yind],[(S['PWS']+PWS_Smat)[ii,ii] for ii in range(svr)]]
ufw_tot=-Ubase['SI']-Ubase['FW']
yvar_fw=[pwmats['FW'].T[xind,:]+pwmats['SI'].T[xind,:]+ufw_tot,pwmats['FW'].T[:,yind]+pwmats['SI'].T[:,yind]+ufw_tot,array([pwmats['FW'].T[ii,ii]+pwmats['SI'].T[ii,ii]+ufw_tot for ii in range(svr)])]
yvar_Q=[pwmats['Q'].T[xind,:]-Ubase['Q'],pwmats['Q'].T[:,yind]-Ubase['Q'],array([pwmats['Q'].T[ii,ii]-Ubase['Q'] for ii in range(svr)])]
xlab=['PWN salinity','PWS salinity','PWS salinity']
titvec=['a) Vary PWN salinity\n\nPWS = 34.4','b) Vary PWS salinity\n\nPWN = 33.7','c) Vary both PW salinities']
lw=2
for kk in ['AWS','PWS','DWS','AWN','PWN']:
axx[0].plot(xvar[0],(pwmats[kk].T[xind,:]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)
axx[1].plot(xvar[1],(pwmats[kk].T[:,yind]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)
axx[2].plot(xvar[2],array([(pwmats[kk].T[ii,ii]-Ubase[kk])for ii in range(svr)]),color=coldic[kk],label=kk,linewidth=lw)
for ii in range(3):
ax1=axx[ii].twinx()
for ll in ['']:
ax1.plot(xvar[ii],(yvar_fw[ii])*1e3,color='c',linewidth=lw)
ax2=axx[ii].twinx()
ax2.plot(xvar[ii],cp*rhow*(yvar_Q[ii])/1e6,color='limegreen',linewidth=lw)
axx[ii].set_xlabel(xlab[ii])
ax1.set_ylim(-10,10)
ax2.set_ylim(-40,40)
axx[ii].set_title(titvec[ii],fontweight='bold')
if ii!=2:
ax1.set_yticklabels('')
ax2.set_yticklabels('')
axx[ii].set_xlim(xvar[ii][0],xvar[ii][-1])
axx[0].set_ylim(-1.5,1.5)
axx[0].set_yticks(arange(-1,1.1,0.5))
ax2.spines["right"].set_position(("axes", 1.3))
axx[0].set_ylabel('Transport anomaly [Sv]')
ax1.set_ylabel('Fresh water flux anomaly [mSv]',color='c')
ax2.set_ylabel('Heat flux anomaly [TW]',color='limegreen')
ax1.tick_params(axis='y', colors='c')
ax2.tick_params(axis='y', colors='limegreen')
leg=axx[0].legend(loc=(0.5,-0.5),ncol=5,fontsize=13)
for line in leg.get_lines():
line.set_linewidth(4.0)
axi2=axx[2].twiny()
axi2.set_xticks(arange(32.8,33.8,0.2))
axi2.set_xlim(xvar[0][0],xvar[0][-1])
axi2.set_xlabel('PWN salinity')
axx[2].axvline(34.4-0.5,color='k',zorder=0)
# axx[0].set_title('a) Vary PWN salinities\n\n',fontweight='bold')
# axx[1].set_title('b) Vary PWS salinities\n\n',fontweight='bold')
# axx[2].set_title('c) Vary both PW salinities',fontweight='bold')
savefig(figdir_paper+'/PWS_dep.png',bbox_inches='tight')
savefig(figdir_paper+'/PWS_dep.pdf',bbox_inches='tight')
lineplot_PW_salinity()
37/(56+37+5)
#######################################################################################
############## What happens if we add more FW? (Like 100mSv) ###########################
#######################################################################################
Ubase['FW']
Ubase['SI']
fwvar={}
for U_FW in arange(0,0.11,0.01):
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
xinit=xbase.copy()
xinit[5]=xinit[5]+U_FW
dv=-AM.dot(xinit)
Evec=xinit/5
Evec[5:7]=1e-10
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
fwvar[U_FW]=xinit+xsol_Ad
U_fwvar=get_U_from_x(fwvar[0.02])
a_fw,b_fw=get_a_b_fracs(U_fwvar,S)
U['FW']+U['SI']
Ubase['FW']+Ubase['SI']+0.05
U_fwvar['FW']+U_fwvar['SI']
U_fwvar['Q']*cp*rhow/1e6
U_fwvar
#######################################################################################
############## What happens if we add more FW and make PWS fresher? ###########################
#######################################################################################
AM=array([[1,1,1,1,1,1,1,0],\
[S['PWS']-0.5,S['AWS'],S['DWS'],S['PWN']-0.5,S['AWN'],S['FW'],S['SI'],0],\
[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])
xinit=xbase.copy()
xinit[5]=xinit[5]+0.02
dv=-AM.dot(xinit)
Evec=xinit/5
Evec[5:7]=1e-10
E=diag(Evec)
Winv=diag([1,1/Snorm,1/Tnorm])
Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))
Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T
Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)
xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))
xsol_Ad=E.dot(xsol_prime)
x_both=xinit+xsol_Ad
U_both=get_U_from_x(x_both)
S_PW=S.copy()
S_PW['PWS']=S['PWS']-0.5
S_PW['PWN']=S['PWN']-0.5
a_both,b_both=get_a_b_fracs(U_both,S_PW)
#######################################################################################
############## Now look at consequences for FW dist ###########################
#######################################################################################
a_pwmat=zeros((len(epsilon),shape(pwmats['Q'])[1],shape(pwmats['Q'])[0]))
b_pwmat=a_pwmat.copy()
for ii,ee in enumerate(1-epsilon):
a_pwmat[ii,:,:]=(ee*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['PWS'].T*((S['PWS']+PWS_Smat)/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)
b_pwmat[ii,:,:]=((1-ee)*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['DWS'].T*(S['DWS']/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)
c_pwmat=1-a_pwmat-b_pwmat
PWN_Smat[10,10]
PWS_Smat[10,10]
PWN_Smat[5,5]
PWS_Smat[5,5]
epsilon=arange(0,1.1,0.1)
fwcol='#43a2ca'
ash='d'
def plot_adep_pw():
f,axx=subplots(1,2,figsize=(11,3.2),sharex=True)
f.subplots_adjust(wspace=0.3)
for ii,var in enumerate([a_pwmat,b_pwmat]):
if ii==0:
xvar=(1-epsilon)
xvar2=1
xvar3=0
else:
xvar=epsilon
xvar2=0
xvar3=1
axx[ii].plot(xvar*Ubase['PWN'],var[:,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,linewidth=3,color='k',label='Base case',zorder=5)
axx[ii].plot(xvar*U_pw['PWN'],var[:,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,color='purple',zorder=4,label='Polar Waters fresher by 0.5',linewidth=3)
axx[ii].plot(xvar2*Ubase['PWN'],var[0,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,'o',color='k',label='',zorder=5)
axx[ii].plot(xvar2*U_pw['PWN'],var[0,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,'o',color='purple',zorder=4,label='')
axx[ii].plot(xvar3*Ubase['PWN'],var[-1,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,ash,color='k',label='',zorder=5)
axx[ii].plot(xvar3*U_pw['PWN'],var[-1,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,ash,color='purple',zorder=4,label='')
axx[ii].set_ylim(-30,140)
axx[0].plot((1-epsilon)*U_fwvar['PWN'],a_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol,label='Add 20 mSv of Fresh Water')
axx[1].plot(epsilon*U_fwvar['PWN'],b_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol)
axx[0].plot(U_fwvar['PWN'],a_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')
axx[1].plot(0,b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')
axx[0].plot(0,a_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')
axx[1].plot(U_fwvar['PWN'],b_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')
axx[0].plot(0.5,56,'*',color='k',label='',markersize=10)
axx[0].plot(1.1,56,'*',color='purple',label='',markersize=10)
axx[1].plot(1.3,37,'*',color='k',label='',markersize=10)
axx[1].plot(1,37,'*',color='purple',label='',markersize=10)
# axx[1].plot(U_fwvar['PWN'],b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'s',color='k',label='')
# axx[0].plot(1-epsilon,a_both,linewidth=3,color='g',label='Both')
# axx[1].plot(1-epsilon,b_both,linewidth=3,color='g')
axx[0].legend(loc=(0.05,-0.5),ncol=3,fontsize=12)
axx[0].set_title('a) Estuarine limb',fontsize=14)
axx[1].set_title('b) Overturning limb',fontsize=14)
axx[0].set_ylabel('$\mathbf{\delta}\ U_{FW}$\nFW transport in $\mathbf{PWS}$ [mSv]')
axx[1].set_ylabel('$\mathbf{\gamma}\ U_{FW}$\nFW transport in $\mathbf{DWS}$ [mSv]')
axx[0].set_xlabel('$\mathbf{(1-\epsilon)} \ U_{PWN}$\nPWN transport in $\mathbf{PWS}$ [Sv]')
axx[1].set_xlabel('$\mathbf{\epsilon} \ U_{PWN}$\nPWN transport in $\mathbf{DWS}$ [Sv]')
for axi in axx[0],axx[1]:
axi.axhline(0,color='k')
axi.set_xlim(-0.05,2.2)
axx[0].axhline(56,color='k',linestyle='--')
axx[1].axhline(37,color='k',linestyle='--')
savefig(figdir_paper+'/FWfrac_obs_pwdep.png',bbox_inches='tight')
savefig(figdir_paper+'/FWfrac_obs_pwdep.pdf',bbox_inches='tight')
plot_adep_pw()
def get_PWN_from_FW(x2,y1,y2,y3):
x3=(y3-y1)*x2/(y2-y1)
return x3
x3_base_PWS=get_PWN_from_FW(Ubase['PWN'],(Ubase['FW']+Ubase['SI'])*a_pwmat[-1,10,10]*1e3,(Ubase['FW']+Ubase['SI'])*a_pwmat[0,10,10]*1e3,50)
x3_base_PWS
Ubase['PWN']
1-x3_base_PWS/Ubase['PWN']
x3_fresh_PWS=get_PWN_from_FW(U_pw['PWN'],(U_pw['FW']+U_pw['SI'])*a_pwmat[-1,5,5]*1e3,(U_pw['FW']+U_pw['SI'])*a_pwmat[0,5,5]*1e3,50)
x3_fresh_PWS
U_pw['PWN']
def get_AWS_from_PWN(Uvar,Svar,eps):
alpha_U=-(Uvar['PWS']*Svar['PWS']+(1-eps)*Uvar['PWN']*Svar['PWN'])/Svar['AWS']
beta_U=-(Uvar['DWS']*Svar['DWS']+eps*Uvar['PWN']*Svar['PWN'])/Svar['AWS']
return alpha_U,beta_U
get_AWS_from_PWN(Ubase,S,0.65)
get_AWS_from_PWN(U_pw,S_PW,0.65)
############################graveyard
# def plot_in_each(axi):
# axi.plot(S['PWN'],S['PWS'],'ko',markersize=10)
# axi.plot(S['PWN']+PWN_Svec,S['PWN']+PWN_Svec,'r-',linewidth=3)
#
# def plot_PW_Sdep(Svec,Tvec,simats):
# f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)
# axivec=array([])
# for axirow in axx:
# for axi in axirow:
# axivec=hstack((axivec,axi))
# for axi,kk in zip(axivec,simats):
# if (kk=='FW') | (kk=='SI'):
# climi=20
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [mSv]')
# cbar=colorbar(contit,ax=axi,format='%1.0f')
# plot_in_each(axi)
# elif kk=='Q':
# climi=30
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,cp*rhow*(pwmats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [TW]')
# cbar=colorbar(contit,ax=axi,format='%2.0f')
# plot_in_each(axi)
# else:
# climi=1.5
# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)
# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')
# axi.set_title(kk+' [Sv]')
# cbar=colorbar(contit,ax=axi,format='%0.2f')
# plot_in_each(axi)
# for label in cbar.ax.yaxis.get_ticklabels()[1::2]:
# label.set_visible(False)
# axi.set_ylim(S['PWS']+PWS_Svec[0],S['PWS']+PWS_Svec[-1])
# f.text(0.5, 0, 'PWN salinity', ha='center',fontsize=14)
# f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)
#
# savefig(figdir_paper+'_extra_2004/PW_Sdep.png',bbox_inches='tight')
# savefig(figdir_paper+'_extra_2004/PW_Sdep.pdf',bbox_inches='tight')
#
#
# plot_PW_Sdep(PWN_Svec,PWS_Svec,pwmats)
# def plot_PW_Sdep_lines():
# f,axx=subplots(2,4,figsize=(15,6),sharex=True)
# axivec=array([])
# for axirow in axx:
# for axi in axirow:
# axivec=hstack((axivec,axi))
# for axi,kk in zip(axivec,simats):
# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[-2,:],(pwmats[kk].T[-2,:]),label='vary PWN salinity')
# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[:,-3],(pwmats[kk].T[:,-3]),label='vary PWS salinity')
# axi.plot(((S['PWN'])-(S['PWS'])),(Ubase[kk]),'ko',label='base case')
# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[5,5]),'ro',label='both 0.5 fresher')
# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[0,0]),'go',label='both 1 fresher')
# axi.set_title(kk)
# axi.legend(loc=(1,0.7))
# f.text(0.5, 0, 'PWN salinity - PWS salinity', ha='center',fontsize=14)
# # f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)
#
# # savefig(figdir_paper+'/PW_Sdep.png',bbox_inches='tight')
# # savefig(figdir_paper+'/PW_Sdep.pdf',bbox_inches='tight')
#
# plot_PW_Sdep_lines()
# Ubase.keys()
|
normal
|
{
"blob_id": "40b94a3be27ebb0d8e3e67fddabe1dc68646169c",
"index": 9881,
"step-1": "<mask token>\n\n\ndef get_U_S_T_from_WM(WM):\n U = {}\n S = {}\n T = {}\n for wm in WM.WM:\n U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n U['SI'] = 0.073\n U['FW'] = 0.028\n U['Q'] = Q\n S['SI'] = 0\n S['FW'] = 0\n T['SI'] = 0\n T['FW'] = 0\n T['Q'] = 1\n return U, S, T\n\n\n<mask token>\n\n\ndef get_U_from_x(x):\n U = {}\n U['PWS'] = x[0]\n U['AWS'] = x[1]\n U['DWS'] = x[2]\n U['PWN'] = x[3]\n U['AWN'] = x[4]\n U['FW'] = x[5]\n U['SI'] = x[6]\n U['Q'] = x[7]\n return U\n\n\n<mask token>\n\n\ndef run_inverse_model(zz, U, S, T):\n dv = -AM[zz].dot(x0[zz])\n if zz == 'base':\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n elif zz == 'massbal':\n Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])\n Evec = array([(xx / 5) for xx in x0[zz]])\n E = diag(Evec)\n Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n xbase = x0[zz] + xsol_Ad\n P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +\n linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase = get_U_from_x(xbase)\n Ue = get_U_from_x(P)\n return Ubase, Ue, xbase\n\n\n<mask token>\n\n\ndef plot_base_case_simple(Ubase, Ue, plt):\n f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,\n gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))\n alf = 0.75\n capi = 7\n axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[\n coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',\n 'DWS']], capsize=capi, alpha=alf)\n axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')\n ylimi = 20\n axx[0].set_ylim(-ylimi, ylimi)\n ylimi = 4\n axx[1].set_ylim(-ylimi, ylimi)\n axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color\n =[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in\n ['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)\n axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',\n color='k')\n axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue\n ['SI'] + Ue['FW'], capsize=capi, alpha=alf)\n axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')\n fwlim = 0.2\n axx[2].set_ylim(-fwlim, fwlim)\n fsz = 14\n axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)\n axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],\n yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)\n axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')\n for ii in range(3):\n axx[ii].axhline(0, color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS', 'DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',\n bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',\n bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_a_b_fracs(Ubase, S):\n a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[\n 'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *\n (S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n return a, b\n\n\n<mask token>\n\n\ndef plot_adep():\n for ii, kk in enumerate(a):\n plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))\n xlabel('$\\\\mathbf{1-\\\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0, 1)\n axhline(0, color='k')\n legend()\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_mats_from_dic(sivar):\n Svec = array([float(ff) for ff in sivar])\n Tvec = array([float(ff) for ff in sivar[Svec[0]]])\n simats = {}\n for QQ, kk in enumerate(Ubase):\n simats[kk] = zeros((len(Svec), len(Tvec)))\n for ii, ss in enumerate(Svec):\n for jj, tt in enumerate(Tvec):\n simats[kk][ii, jj] = sivar[ss][tt][QQ]\n return Svec, Tvec, simats\n\n\n<mask token>\n\n\ndef plot_SIresponse():\n f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)\n axivec = array([])\n for axirow in axx:\n for axi in axirow:\n axivec = hstack((axivec, axi))\n for axi, kk in zip(axivec, simats):\n if (kk == 'FW') | (kk == 'SI'):\n climi = 10\n contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) * \n 1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)\n axi.set_title(kk + ' [mSv]')\n cbar = colorbar(contit, ax=axi, format='%1.0f')\n elif kk == 'Q':\n climi = 30\n contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -\n Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.\n PiYG_r)\n axi.set_title(kk + ' [TW]')\n cbar = colorbar(contit, ax=axi, format='%2.0f')\n else:\n climi = 0.3\n contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],\n vmin=-climi, vmax=climi, cmap=cm.PuOr_r)\n axi.set_title(kk + ' [Sv]')\n cbar = colorbar(contit, ax=axi, format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\\\circ$C]', va=\n 'center', rotation='vertical', fontsize=14)\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=\n 'tight')\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=\n 'tight')\n\n\n<mask token>\n\n\ndef plot_adep_pw():\n f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii, var in enumerate([a_pwmat, b_pwmat]):\n if ii == 0:\n xvar = 1 - epsilon\n xvar2 = 1\n xvar3 = 0\n else:\n xvar = epsilon\n xvar2 = 0\n xvar3 = 1\n axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, linewidth=3, color='k', label=\n 'Base case', zorder=5)\n axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[\n 'SI']) * 1000.0, color='purple', zorder=4, label=\n 'Polar Waters fresher by 0.5', linewidth=3)\n axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)\n axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw\n ['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')\n axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)\n axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +\n U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')\n axx[ii].set_ylim(-30, 140)\n axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +\n U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=\n 'Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[\n 'SI']) * 1000.0, linewidth=3, color=fwcol)\n axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, 'o', color=fwcol, label='')\n axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',\n color=fwcol, label='')\n axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,\n color=fwcol, label='')\n axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, ash, color=fwcol, label='')\n axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)\n axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)\n axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)\n axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)\n axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)\n axx[0].set_title('a) Estuarine limb', fontsize=14)\n axx[1].set_title('b) Overturning limb', fontsize=14)\n axx[0].set_ylabel(\n '$\\\\mathbf{\\\\delta}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel(\n '$\\\\mathbf{\\\\gamma}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel(\n '$\\\\mathbf{(1-\\\\epsilon)} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{PWS}$ [Sv]'\n )\n axx[1].set_xlabel(\n '$\\\\mathbf{\\\\epsilon} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{DWS}$ [Sv]'\n )\n for axi in (axx[0], axx[1]):\n axi.axhline(0, color='k')\n axi.set_xlim(-0.05, 2.2)\n axx[0].axhline(56, color='k', linestyle='--')\n axx[1].axhline(37, color='k', linestyle='--')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_PWN_from_FW(x2, y1, y2, y3):\n x3 = (y3 - y1) * x2 / (y2 - y1)\n return x3\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_U_S_T_from_WM(WM):\n U = {}\n S = {}\n T = {}\n for wm in WM.WM:\n U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n U['SI'] = 0.073\n U['FW'] = 0.028\n U['Q'] = Q\n S['SI'] = 0\n S['FW'] = 0\n T['SI'] = 0\n T['FW'] = 0\n T['Q'] = 1\n return U, S, T\n\n\n<mask token>\n\n\ndef get_U_from_x(x):\n U = {}\n U['PWS'] = x[0]\n U['AWS'] = x[1]\n U['DWS'] = x[2]\n U['PWN'] = x[3]\n U['AWN'] = x[4]\n U['FW'] = x[5]\n U['SI'] = x[6]\n U['Q'] = x[7]\n return U\n\n\n<mask token>\n\n\ndef run_inverse_model(zz, U, S, T):\n dv = -AM[zz].dot(x0[zz])\n if zz == 'base':\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n elif zz == 'massbal':\n Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])\n Evec = array([(xx / 5) for xx in x0[zz]])\n E = diag(Evec)\n Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n xbase = x0[zz] + xsol_Ad\n P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +\n linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase = get_U_from_x(xbase)\n Ue = get_U_from_x(P)\n return Ubase, Ue, xbase\n\n\n<mask token>\n\n\ndef plot_base_case_simple(Ubase, Ue, plt):\n f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,\n gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))\n alf = 0.75\n capi = 7\n axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[\n coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',\n 'DWS']], capsize=capi, alpha=alf)\n axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')\n ylimi = 20\n axx[0].set_ylim(-ylimi, ylimi)\n ylimi = 4\n axx[1].set_ylim(-ylimi, ylimi)\n axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color\n =[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in\n ['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)\n axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',\n color='k')\n axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue\n ['SI'] + Ue['FW'], capsize=capi, alpha=alf)\n axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')\n fwlim = 0.2\n axx[2].set_ylim(-fwlim, fwlim)\n fsz = 14\n axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)\n axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],\n yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)\n axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')\n for ii in range(3):\n axx[ii].axhline(0, color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS', 'DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',\n bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',\n bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_a_b_fracs(Ubase, S):\n a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[\n 'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *\n (S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n return a, b\n\n\n<mask token>\n\n\ndef plot_adep():\n for ii, kk in enumerate(a):\n plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))\n xlabel('$\\\\mathbf{1-\\\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0, 1)\n axhline(0, color='k')\n legend()\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_mats_from_dic(sivar):\n Svec = array([float(ff) for ff in sivar])\n Tvec = array([float(ff) for ff in sivar[Svec[0]]])\n simats = {}\n for QQ, kk in enumerate(Ubase):\n simats[kk] = zeros((len(Svec), len(Tvec)))\n for ii, ss in enumerate(Svec):\n for jj, tt in enumerate(Tvec):\n simats[kk][ii, jj] = sivar[ss][tt][QQ]\n return Svec, Tvec, simats\n\n\n<mask token>\n\n\ndef plot_SIresponse():\n f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)\n axivec = array([])\n for axirow in axx:\n for axi in axirow:\n axivec = hstack((axivec, axi))\n for axi, kk in zip(axivec, simats):\n if (kk == 'FW') | (kk == 'SI'):\n climi = 10\n contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) * \n 1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)\n axi.set_title(kk + ' [mSv]')\n cbar = colorbar(contit, ax=axi, format='%1.0f')\n elif kk == 'Q':\n climi = 30\n contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -\n Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.\n PiYG_r)\n axi.set_title(kk + ' [TW]')\n cbar = colorbar(contit, ax=axi, format='%2.0f')\n else:\n climi = 0.3\n contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],\n vmin=-climi, vmax=climi, cmap=cm.PuOr_r)\n axi.set_title(kk + ' [Sv]')\n cbar = colorbar(contit, ax=axi, format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\\\circ$C]', va=\n 'center', rotation='vertical', fontsize=14)\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=\n 'tight')\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=\n 'tight')\n\n\n<mask token>\n\n\ndef lineplot_PW_salinity():\n f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)\n xind = -1\n yind = -1\n svr = len(PWS_Svec)\n xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],\n [(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]\n ufw_tot = -Ubase['SI'] - Ubase['FW']\n yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,\n pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(\n [(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in\n range(svr)])]\n yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -\n Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in\n range(svr)])]\n xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']\n titvec = ['a) Vary PWN salinity\\n\\nPWS = 34.4',\n 'b) Vary PWS salinity\\n\\nPWN = 33.7', 'c) Vary both PW salinities']\n lw = 2\n for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:\n axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for\n ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)\n for ii in range(3):\n ax1 = axx[ii].twinx()\n for ll in ['']:\n ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)\n ax2 = axx[ii].twinx()\n ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=\n 'limegreen', linewidth=lw)\n axx[ii].set_xlabel(xlab[ii])\n ax1.set_ylim(-10, 10)\n ax2.set_ylim(-40, 40)\n axx[ii].set_title(titvec[ii], fontweight='bold')\n if ii != 2:\n ax1.set_yticklabels('')\n ax2.set_yticklabels('')\n axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])\n axx[0].set_ylim(-1.5, 1.5)\n axx[0].set_yticks(arange(-1, 1.1, 0.5))\n ax2.spines['right'].set_position(('axes', 1.3))\n axx[0].set_ylabel('Transport anomaly [Sv]')\n ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')\n ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')\n ax1.tick_params(axis='y', colors='c')\n ax2.tick_params(axis='y', colors='limegreen')\n leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n axi2 = axx[2].twiny()\n axi2.set_xticks(arange(32.8, 33.8, 0.2))\n axi2.set_xlim(xvar[0][0], xvar[0][-1])\n axi2.set_xlabel('PWN salinity')\n axx[2].axvline(34.4 - 0.5, color='k', zorder=0)\n savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')\n savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef plot_adep_pw():\n f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii, var in enumerate([a_pwmat, b_pwmat]):\n if ii == 0:\n xvar = 1 - epsilon\n xvar2 = 1\n xvar3 = 0\n else:\n xvar = epsilon\n xvar2 = 0\n xvar3 = 1\n axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, linewidth=3, color='k', label=\n 'Base case', zorder=5)\n axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[\n 'SI']) * 1000.0, color='purple', zorder=4, label=\n 'Polar Waters fresher by 0.5', linewidth=3)\n axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)\n axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw\n ['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')\n axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)\n axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +\n U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')\n axx[ii].set_ylim(-30, 140)\n axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +\n U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=\n 'Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[\n 'SI']) * 1000.0, linewidth=3, color=fwcol)\n axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, 'o', color=fwcol, label='')\n axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',\n color=fwcol, label='')\n axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,\n color=fwcol, label='')\n axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, ash, color=fwcol, label='')\n axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)\n axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)\n axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)\n axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)\n axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)\n axx[0].set_title('a) Estuarine limb', fontsize=14)\n axx[1].set_title('b) Overturning limb', fontsize=14)\n axx[0].set_ylabel(\n '$\\\\mathbf{\\\\delta}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel(\n '$\\\\mathbf{\\\\gamma}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel(\n '$\\\\mathbf{(1-\\\\epsilon)} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{PWS}$ [Sv]'\n )\n axx[1].set_xlabel(\n '$\\\\mathbf{\\\\epsilon} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{DWS}$ [Sv]'\n )\n for axi in (axx[0], axx[1]):\n axi.axhline(0, color='k')\n axi.set_xlim(-0.05, 2.2)\n axx[0].axhline(56, color='k', linestyle='--')\n axx[1].axhline(37, color='k', linestyle='--')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')\n\n\n<mask token>\n\n\ndef get_PWN_from_FW(x2, y1, y2, y3):\n x3 = (y3 - y1) * x2 / (y2 - y1)\n return x3\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_U_S_T_from_WM(WM):\n U = {}\n S = {}\n T = {}\n for wm in WM.WM:\n U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n U['SI'] = 0.073\n U['FW'] = 0.028\n U['Q'] = Q\n S['SI'] = 0\n S['FW'] = 0\n T['SI'] = 0\n T['FW'] = 0\n T['Q'] = 1\n return U, S, T\n\n\n<mask token>\n\n\ndef get_U_from_x(x):\n U = {}\n U['PWS'] = x[0]\n U['AWS'] = x[1]\n U['DWS'] = x[2]\n U['PWN'] = x[3]\n U['AWN'] = x[4]\n U['FW'] = x[5]\n U['SI'] = x[6]\n U['Q'] = x[7]\n return U\n\n\n<mask token>\nAM[zz].dot(x0[zz])\n16 / 35\n1.5 / 10\n<mask token>\n\n\ndef run_inverse_model(zz, U, S, T):\n dv = -AM[zz].dot(x0[zz])\n if zz == 'base':\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n elif zz == 'massbal':\n Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])\n Evec = array([(xx / 5) for xx in x0[zz]])\n E = diag(Evec)\n Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n xbase = x0[zz] + xsol_Ad\n P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +\n linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase = get_U_from_x(xbase)\n Ue = get_U_from_x(P)\n return Ubase, Ue, xbase\n\n\n<mask token>\n\n\ndef plot_base_case_simple(Ubase, Ue, plt):\n f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,\n gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))\n alf = 0.75\n capi = 7\n axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[\n coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',\n 'DWS']], capsize=capi, alpha=alf)\n axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')\n ylimi = 20\n axx[0].set_ylim(-ylimi, ylimi)\n ylimi = 4\n axx[1].set_ylim(-ylimi, ylimi)\n axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color\n =[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in\n ['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)\n axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',\n color='k')\n axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue\n ['SI'] + Ue['FW'], capsize=capi, alpha=alf)\n axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')\n fwlim = 0.2\n axx[2].set_ylim(-fwlim, fwlim)\n fsz = 14\n axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)\n axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],\n yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)\n axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')\n for ii in range(3):\n axx[ii].axhline(0, color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS', 'DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',\n bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',\n bbox_inches='tight')\n\n\nplot_base_case_simple(Ubase, Ue, 'base')\nU\nUbase['SI'] + Ubase['FW']\nUbase['Q'] * cp * rhow / 1000000.0\n<mask token>\nbasediff\nplot_base_case_simple(Umb_sol, Umb_err, 'mb')\n[(kk, Umb_sol[kk] - U_mb[kk]) for kk in Ubase]\n<mask token>\n\n\ndef get_a_b_fracs(Ubase, S):\n a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[\n 'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *\n (S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n return a, b\n\n\nS['PWN'] / S['AWS']\nS['PWS'] / S['AWS']\nS['DWS'] / S['AWS']\nUbase['PWS']\nUbase['DWS']\nUbase['PWN'] * (S['PWN'] / S['AWS'] - 1)\nUbase['PWS'] * (S['PWS'] / S['AWS'] - 1)\nUbase['DWS'] * (S['DWS'] / S['AWS'] - 1)\nUbase['FW'] + Ubase['SI']\n<mask token>\n[(kk, S[kk] - S_mb[kk]) for kk in S]\n\n\ndef plot_adep():\n for ii, kk in enumerate(a):\n plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))\n xlabel('$\\\\mathbf{1-\\\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0, 1)\n axhline(0, color='k')\n legend()\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')\n\n\nplot_adep()\n<mask token>\nfor S_SI in range(0, 10, 2):\n sivar[S_SI] = {}\n for T_SI in range(-90, 5, 10):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],\n S['PWN'], S['AWN'], S['FW'], S_SI, 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T_SI, 1]])\n dv = -AM.dot(xbase)\n Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n sivar[S_SI][T_SI] = xbase + xsol_Ad\n\n\ndef get_mats_from_dic(sivar):\n Svec = array([float(ff) for ff in sivar])\n Tvec = array([float(ff) for ff in sivar[Svec[0]]])\n simats = {}\n for QQ, kk in enumerate(Ubase):\n simats[kk] = zeros((len(Svec), len(Tvec)))\n for ii, ss in enumerate(Svec):\n for jj, tt in enumerate(Tvec):\n simats[kk][ii, jj] = sivar[ss][tt][QQ]\n return Svec, Tvec, simats\n\n\n<mask token>\n\n\ndef plot_SIresponse():\n f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)\n axivec = array([])\n for axirow in axx:\n for axi in axirow:\n axivec = hstack((axivec, axi))\n for axi, kk in zip(axivec, simats):\n if (kk == 'FW') | (kk == 'SI'):\n climi = 10\n contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) * \n 1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)\n axi.set_title(kk + ' [mSv]')\n cbar = colorbar(contit, ax=axi, format='%1.0f')\n elif kk == 'Q':\n climi = 30\n contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -\n Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.\n PiYG_r)\n axi.set_title(kk + ' [TW]')\n cbar = colorbar(contit, ax=axi, format='%2.0f')\n else:\n climi = 0.3\n contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],\n vmin=-climi, vmax=climi, cmap=cm.PuOr_r)\n axi.set_title(kk + ' [Sv]')\n cbar = colorbar(contit, ax=axi, format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\\\circ$C]', va=\n 'center', rotation='vertical', fontsize=14)\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=\n 'tight')\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=\n 'tight')\n\n\nplot_SIresponse()\ncontourf(simats['AWN'].T - Ubase['AWN'] + simats['PWN'].T - Ubase['PWN'])\ncolorbar()\n<mask token>\nfor S_PWNa in arange(-1, 0.05, 0.1):\n pwsvar[S_PWNa] = {}\n for S_PWSa in arange(-1.0, 0.05, 0.1):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] + S_PWSa, S['AWS'],\n S['DWS'], S['PWN'] + S_PWNa, S['AWN'], S['FW'], S['SI'], 0], [T\n ['PWS'], T['AWS'], T['DWS'], T['PWN'], T['AWN'], T['FW'], T[\n 'SI'], 1]])\n dv = -AM.dot(xbase)\n Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n pwsvar[S_PWNa][S_PWSa] = xbase + xsol_Ad\n<mask token>\n(U_pw['FW'] + U_pw['SI'] - (Ubase['FW'] + Ubase['SI'])) * 1000.0\nU_pw['FW'] + U_pw['SI']\nUbase['FW'] + Ubase['SI']\nU_si\n[(kk, U_si[kk] - Ubase[kk]) for kk in Ubase]\n[(U_si[kk] - Ubase[kk]) for kk in Ubase][-1] * cp * rhow / 1000000.0\nU_pw['Q'] * cp * rhow / 1000000.0\n\n\ndef lineplot_PW_salinity():\n f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)\n xind = -1\n yind = -1\n svr = len(PWS_Svec)\n xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],\n [(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]\n ufw_tot = -Ubase['SI'] - Ubase['FW']\n yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,\n pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(\n [(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in\n range(svr)])]\n yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -\n Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in\n range(svr)])]\n xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']\n titvec = ['a) Vary PWN salinity\\n\\nPWS = 34.4',\n 'b) Vary PWS salinity\\n\\nPWN = 33.7', 'c) Vary both PW salinities']\n lw = 2\n for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:\n axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for\n ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)\n for ii in range(3):\n ax1 = axx[ii].twinx()\n for ll in ['']:\n ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)\n ax2 = axx[ii].twinx()\n ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=\n 'limegreen', linewidth=lw)\n axx[ii].set_xlabel(xlab[ii])\n ax1.set_ylim(-10, 10)\n ax2.set_ylim(-40, 40)\n axx[ii].set_title(titvec[ii], fontweight='bold')\n if ii != 2:\n ax1.set_yticklabels('')\n ax2.set_yticklabels('')\n axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])\n axx[0].set_ylim(-1.5, 1.5)\n axx[0].set_yticks(arange(-1, 1.1, 0.5))\n ax2.spines['right'].set_position(('axes', 1.3))\n axx[0].set_ylabel('Transport anomaly [Sv]')\n ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')\n ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')\n ax1.tick_params(axis='y', colors='c')\n ax2.tick_params(axis='y', colors='limegreen')\n leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n axi2 = axx[2].twiny()\n axi2.set_xticks(arange(32.8, 33.8, 0.2))\n axi2.set_xlim(xvar[0][0], xvar[0][-1])\n axi2.set_xlabel('PWN salinity')\n axx[2].axvline(34.4 - 0.5, color='k', zorder=0)\n savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')\n savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')\n\n\nlineplot_PW_salinity()\n37 / (56 + 37 + 5)\nUbase['FW']\nUbase['SI']\n<mask token>\nfor U_FW in arange(0, 0.11, 0.01):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'], S[\n 'PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])\n xinit = xbase.copy()\n xinit[5] = xinit[5] + U_FW\n dv = -AM.dot(xinit)\n Evec = xinit / 5\n Evec[5:7] = 1e-10\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n fwvar[U_FW] = xinit + xsol_Ad\n<mask token>\nU['FW'] + U['SI']\nUbase['FW'] + Ubase['SI'] + 0.05\nU_fwvar['FW'] + U_fwvar['SI']\nU_fwvar['Q'] * cp * rhow / 1000000.0\nU_fwvar\n<mask token>\nfor ii, ee in enumerate(1 - epsilon):\n a_pwmat[ii, :, :] = (ee * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat) / S[\n 'AWS'] - 1) + pwmats['PWS'].T * ((S['PWS'] + PWS_Smat) / S['AWS'] - 1)\n ) / (pwmats['FW'].T + pwmats['SI'].T)\n b_pwmat[ii, :, :] = ((1 - ee) * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat\n ) / S['AWS'] - 1) + pwmats['DWS'].T * (S['DWS'] / S['AWS'] - 1)) / (\n pwmats['FW'].T + pwmats['SI'].T)\n<mask token>\nPWN_Smat[10, 10]\nPWS_Smat[10, 10]\nPWN_Smat[5, 5]\nPWS_Smat[5, 5]\n<mask token>\n\n\ndef plot_adep_pw():\n f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii, var in enumerate([a_pwmat, b_pwmat]):\n if ii == 0:\n xvar = 1 - epsilon\n xvar2 = 1\n xvar3 = 0\n else:\n xvar = epsilon\n xvar2 = 0\n xvar3 = 1\n axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, linewidth=3, color='k', label=\n 'Base case', zorder=5)\n axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[\n 'SI']) * 1000.0, color='purple', zorder=4, label=\n 'Polar Waters fresher by 0.5', linewidth=3)\n axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)\n axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw\n ['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')\n axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)\n axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +\n U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')\n axx[ii].set_ylim(-30, 140)\n axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +\n U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=\n 'Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[\n 'SI']) * 1000.0, linewidth=3, color=fwcol)\n axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, 'o', color=fwcol, label='')\n axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',\n color=fwcol, label='')\n axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,\n color=fwcol, label='')\n axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, ash, color=fwcol, label='')\n axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)\n axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)\n axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)\n axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)\n axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)\n axx[0].set_title('a) Estuarine limb', fontsize=14)\n axx[1].set_title('b) Overturning limb', fontsize=14)\n axx[0].set_ylabel(\n '$\\\\mathbf{\\\\delta}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel(\n '$\\\\mathbf{\\\\gamma}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel(\n '$\\\\mathbf{(1-\\\\epsilon)} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{PWS}$ [Sv]'\n )\n axx[1].set_xlabel(\n '$\\\\mathbf{\\\\epsilon} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{DWS}$ [Sv]'\n )\n for axi in (axx[0], axx[1]):\n axi.axhline(0, color='k')\n axi.set_xlim(-0.05, 2.2)\n axx[0].axhline(56, color='k', linestyle='--')\n axx[1].axhline(37, color='k', linestyle='--')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')\n\n\nplot_adep_pw()\n\n\ndef get_PWN_from_FW(x2, y1, y2, y3):\n x3 = (y3 - y1) * x2 / (y2 - y1)\n return x3\n\n\n<mask token>\nx3_base_PWS\nUbase['PWN']\n1 - x3_base_PWS / Ubase['PWN']\n<mask token>\nx3_fresh_PWS\nU_pw['PWN']\n\n\ndef get_AWS_from_PWN(Uvar, Svar, eps):\n alpha_U = -(Uvar['PWS'] * Svar['PWS'] + (1 - eps) * Uvar['PWN'] * Svar[\n 'PWN']) / Svar['AWS']\n beta_U = -(Uvar['DWS'] * Svar['DWS'] + eps * Uvar['PWN'] * Svar['PWN']\n ) / Svar['AWS']\n return alpha_U, beta_U\n\n\nget_AWS_from_PWN(Ubase, S, 0.65)\nget_AWS_from_PWN(U_pw, S_PW, 0.65)\n",
"step-4": "<mask token>\nfigdir = (\n '/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/Linear/'\n )\nfigdir_paper = (\n '/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/paperfigs'\n )\nWM = xr.open_dataset(datadir + 'FW_WM/OSNAP2014-18_WM_2008.nc')\nWM_mb = xr.open_dataset(datadir + 'FW_WM/OSNAP2014-18_WM_mb_2008.nc')\ncp = 3850\nrhow = 1025\ntera = 10 ** 12\nQ = -251 * tera / rhow / cp / 1000000.0\n\n\ndef get_U_S_T_from_WM(WM):\n U = {}\n S = {}\n T = {}\n for wm in WM.WM:\n U[str(wm.values)] = float(WM['TRANS'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)] = float(WM['PSAL'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)] = float(WM['PTMP'].sel(WM=wm).groupby(\n 'TIME.month').mean('TIME').mean(dim='month').values)\n U['SI'] = 0.073\n U['FW'] = 0.028\n U['Q'] = Q\n S['SI'] = 0\n S['FW'] = 0\n T['SI'] = 0\n T['FW'] = 0\n T['Q'] = 1\n return U, S, T\n\n\nU, S, T = get_U_S_T_from_WM(WM)\nU_mb, S_mb, T_mb = get_U_S_T_from_WM(WM_mb)\n\n\ndef get_U_from_x(x):\n U = {}\n U['PWS'] = x[0]\n U['AWS'] = x[1]\n U['DWS'] = x[2]\n U['PWN'] = x[3]\n U['AWN'] = x[4]\n U['FW'] = x[5]\n U['SI'] = x[6]\n U['Q'] = x[7]\n return U\n\n\nAM = {}\nx0 = {}\nAM['base'] = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],\n S['PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T['DWS'],\n T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])\nx0['base'] = [U['PWS'], U['AWS'], U['DWS'], U['PWN'], U['AWN'], U['FW'], U[\n 'SI'], U['Q']]\nAM['massbal'] = array([[1, 1, 1, 0, 0, 0.5, 0.5, 0], [0, 0, 0, 1, 1, 0.5, \n 0.5, 0], [S_mb['PWS'], S_mb['AWS'], S_mb['DWS'], S_mb['PWN'], S_mb[\n 'AWN'], S_mb['FW'], S_mb['SI'], 0], [T_mb['PWS'], T_mb['AWS'], T_mb[\n 'DWS'], T_mb['PWN'], T_mb['AWN'], T_mb['FW'], T_mb['SI'], 1]])\nx0['massbal'] = [U_mb['PWS'], U_mb['AWS'], U_mb['DWS'], U_mb['PWN'], U_mb[\n 'AWN'], U_mb['FW'], U_mb['SI'], U_mb['Q']]\nzz = 'base'\nAM[zz].dot(x0[zz])\n16 / 35\n1.5 / 10\nSnorm = 35\nTnorm = 5\n\n\ndef run_inverse_model(zz, U, S, T):\n dv = -AM[zz].dot(x0[zz])\n if zz == 'base':\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n elif zz == 'massbal':\n Winv = diag([1, 1, 1 / Snorm, 1 / Tnorm])\n Evec = array([(xx / 5) for xx in x0[zz]])\n E = diag(Evec)\n Umat, D, VmatT = linalg.svd(Winv.dot(AM[zz].dot(E)))\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n xbase = x0[zz] + xsol_Ad\n P = diag(E - E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T)) +\n linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase = get_U_from_x(xbase)\n Ue = get_U_from_x(P)\n return Ubase, Ue, xbase\n\n\nUbase, Ue, xbase = run_inverse_model('base', U, S, T)\nUmb_sol, Umb_err, xmb = run_inverse_model('massbal', U_mb, S_mb, T_mb)\ncoldic = {'AWS': 'red', 'DWS': 'grey', 'PWS': 'royalblue', 'PWN': 'purple',\n 'AWN': 'orange', 'SI': 'cyan', 'FW': 'cyan', 'Q': 'limegreen'}\n\n\ndef plot_base_case_simple(Ubase, Ue, plt):\n f, axx = subplots(1, 4, figsize=(9, 2.5), constrained_layout=True,\n gridspec_kw=dict(width_ratios=[2, 3, 1, 1]))\n alf = 0.75\n capi = 7\n axx[0].bar(range(2), [Ubase[kk] for kk in ['AWS', 'DWS']], color=[\n coldic[kk] for kk in ['AWS', 'DWS']], yerr=[Ue[kk] for kk in ['AWS',\n 'DWS']], capsize=capi, alpha=alf)\n axx[0].plot(range(2), [U[kk] for kk in ['AWS', 'DWS']], 'o', color='k')\n ylimi = 20\n axx[0].set_ylim(-ylimi, ylimi)\n ylimi = 4\n axx[1].set_ylim(-ylimi, ylimi)\n axx[1].bar(range(3), [Ubase[kk] for kk in ['PWS', 'PWN', 'AWN']], color\n =[coldic[kk] for kk in ['PWS', 'PWN', 'AWN']], yerr=[Ue[kk] for kk in\n ['PWS', 'PWN', 'AWN']], capsize=capi, alpha=alf)\n axx[1].plot(range(3), [U[kk] for kk in ['PWS', 'PWN', 'AWN']], 'o',\n color='k')\n axx[2].bar(range(1), U['SI'] + Ubase['FW'], color=coldic['FW'], yerr=Ue\n ['SI'] + Ue['FW'], capsize=capi, alpha=alf)\n axx[2].plot(range(1), U['SI'] + U['FW'], 'o', color='k')\n fwlim = 0.2\n axx[2].set_ylim(-fwlim, fwlim)\n fsz = 14\n axx[0].set_ylabel('Volume transport [Sv]', fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]', fontsize=fsz)\n axx[3].bar(0, cp * rhow * Ubase['Q'] / 1000000.0, color=coldic['Q'],\n yerr=cp * rhow * Ue['Q'] / 1000000.0, capsize=capi, alpha=alf)\n axx[3].plot(0, cp * rhow * U['Q'] / 1000000.0, 'o', color='k')\n for ii in range(3):\n axx[ii].axhline(0, color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS', 'DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS', 'PWN', 'AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.png',\n bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/InvBudSol_' + plt + '.pdf',\n bbox_inches='tight')\n\n\nplot_base_case_simple(Ubase, Ue, 'base')\nU\nUbase['SI'] + Ubase['FW']\nUbase['Q'] * cp * rhow / 1000000.0\nbasediff = [(kk, Ubase[kk] - U[kk]) for kk in Ubase]\nbasediff\nplot_base_case_simple(Umb_sol, Umb_err, 'mb')\n[(kk, Umb_sol[kk] - U_mb[kk]) for kk in Ubase]\nepsilon = arange(0, 1.1, 0.1)\n\n\ndef get_a_b_fracs(Ubase, S):\n a = ((1 - epsilon) * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase[\n 'PWS'] * (S['PWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n b = (epsilon * Ubase['PWN'] * (S['PWN'] / S['AWS'] - 1) + Ubase['DWS'] *\n (S['DWS'] / S['AWS'] - 1)) / (Ubase['FW'] + Ubase['SI'])\n return a, b\n\n\nS['PWN'] / S['AWS']\nS['PWS'] / S['AWS']\nS['DWS'] / S['AWS']\nUbase['PWS']\nUbase['DWS']\nUbase['PWN'] * (S['PWN'] / S['AWS'] - 1)\nUbase['PWS'] * (S['PWS'] / S['AWS'] - 1)\nUbase['DWS'] * (S['DWS'] / S['AWS'] - 1)\nUbase['FW'] + Ubase['SI']\na = {}\nb = {}\na['base'], b['base'] = get_a_b_fracs(Ubase, S)\na['mb'], b['mb'] = get_a_b_fracs(Umb_sol, S_mb)\n[(kk, S[kk] - S_mb[kk]) for kk in S]\n\n\ndef plot_adep():\n for ii, kk in enumerate(a):\n plot(1 - epsilon, a[kk], linewidth=3, label=kk, color='C' + str(ii))\n xlabel('$\\\\mathbf{1-\\\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0, 1)\n axhline(0, color='k')\n legend()\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.png', bbox_inches='tight')\n savefig(figdir_paper + '_extra_2004/FWfrac_mbdep.pdf', bbox_inches='tight')\n\n\nplot_adep()\nsivar = {}\nfor S_SI in range(0, 10, 2):\n sivar[S_SI] = {}\n for T_SI in range(-90, 5, 10):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'],\n S['PWN'], S['AWN'], S['FW'], S_SI, 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T_SI, 1]])\n dv = -AM.dot(xbase)\n Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n sivar[S_SI][T_SI] = xbase + xsol_Ad\n\n\ndef get_mats_from_dic(sivar):\n Svec = array([float(ff) for ff in sivar])\n Tvec = array([float(ff) for ff in sivar[Svec[0]]])\n simats = {}\n for QQ, kk in enumerate(Ubase):\n simats[kk] = zeros((len(Svec), len(Tvec)))\n for ii, ss in enumerate(Svec):\n for jj, tt in enumerate(Tvec):\n simats[kk][ii, jj] = sivar[ss][tt][QQ]\n return Svec, Tvec, simats\n\n\nSvec, Tvec, simats = get_mats_from_dic(sivar)\n\n\ndef plot_SIresponse():\n f, axx = subplots(2, 4, figsize=(15, 6), sharex=True, sharey=True)\n axivec = array([])\n for axirow in axx:\n for axi in axirow:\n axivec = hstack((axivec, axi))\n for axi, kk in zip(axivec, simats):\n if (kk == 'FW') | (kk == 'SI'):\n climi = 10\n contit = axi.contourf(Svec, Tvec, (simats[kk].T - Ubase[kk]) * \n 1000.0, vmin=-climi, vmax=climi, cmap=cm.RdBu)\n axi.set_title(kk + ' [mSv]')\n cbar = colorbar(contit, ax=axi, format='%1.0f')\n elif kk == 'Q':\n climi = 30\n contit = axi.contourf(Svec, Tvec, cp * rhow * (simats['Q'].T -\n Ubase['Q']) / 1000000.0, vmin=-climi, vmax=climi, cmap=cm.\n PiYG_r)\n axi.set_title(kk + ' [TW]')\n cbar = colorbar(contit, ax=axi, format='%2.0f')\n else:\n climi = 0.3\n contit = axi.contourf(Svec, Tvec, simats[kk].T - Ubase[kk],\n vmin=-climi, vmax=climi, cmap=cm.PuOr_r)\n axi.set_title(kk + ' [Sv]')\n cbar = colorbar(contit, ax=axi, format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n f.text(0.5, 0, 'sea ice salinity', ha='center', fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\\\circ$C]', va=\n 'center', rotation='vertical', fontsize=14)\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.png', bbox_inches=\n 'tight')\n savefig(figdir_paper + '_extra_2004/SeaIce_paramdep.pdf', bbox_inches=\n 'tight')\n\n\nplot_SIresponse()\ncontourf(simats['AWN'].T - Ubase['AWN'] + simats['PWN'].T - Ubase['PWN'])\ncolorbar()\npwsvar = {}\nfor S_PWNa in arange(-1, 0.05, 0.1):\n pwsvar[S_PWNa] = {}\n for S_PWSa in arange(-1.0, 0.05, 0.1):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] + S_PWSa, S['AWS'],\n S['DWS'], S['PWN'] + S_PWNa, S['AWN'], S['FW'], S['SI'], 0], [T\n ['PWS'], T['AWS'], T['DWS'], T['PWN'], T['AWN'], T['FW'], T[\n 'SI'], 1]])\n dv = -AM.dot(xbase)\n Evec = array(hstack(([1] * 5, xbase[-3:] / 5)))\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n pwsvar[S_PWNa][S_PWSa] = xbase + xsol_Ad\nPWN_Svec, PWS_Svec, pwmats = get_mats_from_dic(pwsvar)\nPWN_Smat, PWS_Smat = meshgrid(PWN_Svec, PWS_Svec)\nU_si = get_U_from_x(sivar[0][-30])\nU_pw = get_U_from_x(pwsvar[-0.5000000000000001][-0.5000000000000001])\n(U_pw['FW'] + U_pw['SI'] - (Ubase['FW'] + Ubase['SI'])) * 1000.0\nU_pw['FW'] + U_pw['SI']\nUbase['FW'] + Ubase['SI']\nU_si\n[(kk, U_si[kk] - Ubase[kk]) for kk in Ubase]\n[(U_si[kk] - Ubase[kk]) for kk in Ubase][-1] * cp * rhow / 1000000.0\nU_pw['Q'] * cp * rhow / 1000000.0\n\n\ndef lineplot_PW_salinity():\n f, axx = subplots(1, 3, figsize=(11, 3), sharey=True)\n xind = -1\n yind = -1\n svr = len(PWS_Svec)\n xvar = [(S['PWN'] + PWN_Smat)[xind, :], (S['PWS'] + PWS_Smat)[:, yind],\n [(S['PWS'] + PWS_Smat)[ii, ii] for ii in range(svr)]]\n ufw_tot = -Ubase['SI'] - Ubase['FW']\n yvar_fw = [pwmats['FW'].T[xind, :] + pwmats['SI'].T[xind, :] + ufw_tot,\n pwmats['FW'].T[:, yind] + pwmats['SI'].T[:, yind] + ufw_tot, array(\n [(pwmats['FW'].T[ii, ii] + pwmats['SI'].T[ii, ii] + ufw_tot) for ii in\n range(svr)])]\n yvar_Q = [pwmats['Q'].T[xind, :] - Ubase['Q'], pwmats['Q'].T[:, yind] -\n Ubase['Q'], array([(pwmats['Q'].T[ii, ii] - Ubase['Q']) for ii in\n range(svr)])]\n xlab = ['PWN salinity', 'PWS salinity', 'PWS salinity']\n titvec = ['a) Vary PWN salinity\\n\\nPWS = 34.4',\n 'b) Vary PWS salinity\\n\\nPWN = 33.7', 'c) Vary both PW salinities']\n lw = 2\n for kk in ['AWS', 'PWS', 'DWS', 'AWN', 'PWN']:\n axx[0].plot(xvar[0], pwmats[kk].T[xind, :] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[1].plot(xvar[1], pwmats[kk].T[:, yind] - Ubase[kk], color=\n coldic[kk], label=kk, linewidth=lw)\n axx[2].plot(xvar[2], array([(pwmats[kk].T[ii, ii] - Ubase[kk]) for\n ii in range(svr)]), color=coldic[kk], label=kk, linewidth=lw)\n for ii in range(3):\n ax1 = axx[ii].twinx()\n for ll in ['']:\n ax1.plot(xvar[ii], yvar_fw[ii] * 1000.0, color='c', linewidth=lw)\n ax2 = axx[ii].twinx()\n ax2.plot(xvar[ii], cp * rhow * yvar_Q[ii] / 1000000.0, color=\n 'limegreen', linewidth=lw)\n axx[ii].set_xlabel(xlab[ii])\n ax1.set_ylim(-10, 10)\n ax2.set_ylim(-40, 40)\n axx[ii].set_title(titvec[ii], fontweight='bold')\n if ii != 2:\n ax1.set_yticklabels('')\n ax2.set_yticklabels('')\n axx[ii].set_xlim(xvar[ii][0], xvar[ii][-1])\n axx[0].set_ylim(-1.5, 1.5)\n axx[0].set_yticks(arange(-1, 1.1, 0.5))\n ax2.spines['right'].set_position(('axes', 1.3))\n axx[0].set_ylabel('Transport anomaly [Sv]')\n ax1.set_ylabel('Fresh water flux anomaly [mSv]', color='c')\n ax2.set_ylabel('Heat flux anomaly [TW]', color='limegreen')\n ax1.tick_params(axis='y', colors='c')\n ax2.tick_params(axis='y', colors='limegreen')\n leg = axx[0].legend(loc=(0.5, -0.5), ncol=5, fontsize=13)\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n axi2 = axx[2].twiny()\n axi2.set_xticks(arange(32.8, 33.8, 0.2))\n axi2.set_xlim(xvar[0][0], xvar[0][-1])\n axi2.set_xlabel('PWN salinity')\n axx[2].axvline(34.4 - 0.5, color='k', zorder=0)\n savefig(figdir_paper + '/PWS_dep.png', bbox_inches='tight')\n savefig(figdir_paper + '/PWS_dep.pdf', bbox_inches='tight')\n\n\nlineplot_PW_salinity()\n37 / (56 + 37 + 5)\nUbase['FW']\nUbase['SI']\nfwvar = {}\nfor U_FW in arange(0, 0.11, 0.01):\n AM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'], S['AWS'], S['DWS'], S[\n 'PWN'], S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])\n xinit = xbase.copy()\n xinit[5] = xinit[5] + U_FW\n dv = -AM.dot(xinit)\n Evec = xinit / 5\n Evec[5:7] = 1e-10\n E = diag(Evec)\n Winv = diag([1, 1 / Snorm, 1 / Tnorm])\n Umat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\n xsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad = E.dot(xsol_prime)\n fwvar[U_FW] = xinit + xsol_Ad\nU_fwvar = get_U_from_x(fwvar[0.02])\na_fw, b_fw = get_a_b_fracs(U_fwvar, S)\nU['FW'] + U['SI']\nUbase['FW'] + Ubase['SI'] + 0.05\nU_fwvar['FW'] + U_fwvar['SI']\nU_fwvar['Q'] * cp * rhow / 1000000.0\nU_fwvar\nAM = array([[1, 1, 1, 1, 1, 1, 1, 0], [S['PWS'] - 0.5, S['AWS'], S['DWS'], \n S['PWN'] - 0.5, S['AWN'], S['FW'], S['SI'], 0], [T['PWS'], T['AWS'], T[\n 'DWS'], T['PWN'], T['AWN'], T['FW'], T['SI'], 1]])\nxinit = xbase.copy()\nxinit[5] = xinit[5] + 0.02\ndv = -AM.dot(xinit)\nEvec = xinit / 5\nEvec[5:7] = 1e-10\nE = diag(Evec)\nWinv = diag([1, 1 / Snorm, 1 / Tnorm])\nUmat, D, VmatT = linalg.svd(Winv.dot(AM.dot(E)))\nLambda_inv = zeros((AM.shape[0], AM.shape[1])).T\nLambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1 / D)\nxsol_prime = VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\nxsol_Ad = E.dot(xsol_prime)\nx_both = xinit + xsol_Ad\nU_both = get_U_from_x(x_both)\nS_PW = S.copy()\nS_PW['PWS'] = S['PWS'] - 0.5\nS_PW['PWN'] = S['PWN'] - 0.5\na_both, b_both = get_a_b_fracs(U_both, S_PW)\na_pwmat = zeros((len(epsilon), shape(pwmats['Q'])[1], shape(pwmats['Q'])[0]))\nb_pwmat = a_pwmat.copy()\nfor ii, ee in enumerate(1 - epsilon):\n a_pwmat[ii, :, :] = (ee * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat) / S[\n 'AWS'] - 1) + pwmats['PWS'].T * ((S['PWS'] + PWS_Smat) / S['AWS'] - 1)\n ) / (pwmats['FW'].T + pwmats['SI'].T)\n b_pwmat[ii, :, :] = ((1 - ee) * pwmats['PWN'].T * ((S['PWN'] + PWN_Smat\n ) / S['AWS'] - 1) + pwmats['DWS'].T * (S['DWS'] / S['AWS'] - 1)) / (\n pwmats['FW'].T + pwmats['SI'].T)\nc_pwmat = 1 - a_pwmat - b_pwmat\nPWN_Smat[10, 10]\nPWS_Smat[10, 10]\nPWN_Smat[5, 5]\nPWS_Smat[5, 5]\nepsilon = arange(0, 1.1, 0.1)\nfwcol = '#43a2ca'\nash = 'd'\n\n\ndef plot_adep_pw():\n f, axx = subplots(1, 2, figsize=(11, 3.2), sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii, var in enumerate([a_pwmat, b_pwmat]):\n if ii == 0:\n xvar = 1 - epsilon\n xvar2 = 1\n xvar3 = 0\n else:\n xvar = epsilon\n xvar2 = 0\n xvar3 = 1\n axx[ii].plot(xvar * Ubase['PWN'], var[:, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, linewidth=3, color='k', label=\n 'Base case', zorder=5)\n axx[ii].plot(xvar * U_pw['PWN'], var[:, 5, 5] * (U_pw['FW'] + U_pw[\n 'SI']) * 1000.0, color='purple', zorder=4, label=\n 'Polar Waters fresher by 0.5', linewidth=3)\n axx[ii].plot(xvar2 * Ubase['PWN'], var[0, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, 'o', color='k', label='', zorder=5)\n axx[ii].plot(xvar2 * U_pw['PWN'], var[0, 5, 5] * (U_pw['FW'] + U_pw\n ['SI']) * 1000.0, 'o', color='purple', zorder=4, label='')\n axx[ii].plot(xvar3 * Ubase['PWN'], var[-1, 10, 10] * (Ubase['FW'] +\n Ubase['SI']) * 1000.0, ash, color='k', label='', zorder=5)\n axx[ii].plot(xvar3 * U_pw['PWN'], var[-1, 5, 5] * (U_pw['FW'] +\n U_pw['SI']) * 1000.0, ash, color='purple', zorder=4, label='')\n axx[ii].set_ylim(-30, 140)\n axx[0].plot((1 - epsilon) * U_fwvar['PWN'], a_fw * (U_fwvar['FW'] +\n U_fwvar['SI']) * 1000.0, linewidth=3, color=fwcol, label=\n 'Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon * U_fwvar['PWN'], b_fw * (U_fwvar['FW'] + U_fwvar[\n 'SI']) * 1000.0, linewidth=3, color=fwcol)\n axx[0].plot(U_fwvar['PWN'], a_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, 'o', color=fwcol, label='')\n axx[1].plot(0, b_fw[0] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, 'o',\n color=fwcol, label='')\n axx[0].plot(0, a_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) * 1000.0, ash,\n color=fwcol, label='')\n axx[1].plot(U_fwvar['PWN'], b_fw[-1] * (U_fwvar['FW'] + U_fwvar['SI']) *\n 1000.0, ash, color=fwcol, label='')\n axx[0].plot(0.5, 56, '*', color='k', label='', markersize=10)\n axx[0].plot(1.1, 56, '*', color='purple', label='', markersize=10)\n axx[1].plot(1.3, 37, '*', color='k', label='', markersize=10)\n axx[1].plot(1, 37, '*', color='purple', label='', markersize=10)\n axx[0].legend(loc=(0.05, -0.5), ncol=3, fontsize=12)\n axx[0].set_title('a) Estuarine limb', fontsize=14)\n axx[1].set_title('b) Overturning limb', fontsize=14)\n axx[0].set_ylabel(\n '$\\\\mathbf{\\\\delta}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel(\n '$\\\\mathbf{\\\\gamma}\\\\ U_{FW}$\\nFW transport in $\\\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel(\n '$\\\\mathbf{(1-\\\\epsilon)} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{PWS}$ [Sv]'\n )\n axx[1].set_xlabel(\n '$\\\\mathbf{\\\\epsilon} \\\\ U_{PWN}$\\nPWN transport in $\\\\mathbf{DWS}$ [Sv]'\n )\n for axi in (axx[0], axx[1]):\n axi.axhline(0, color='k')\n axi.set_xlim(-0.05, 2.2)\n axx[0].axhline(56, color='k', linestyle='--')\n axx[1].axhline(37, color='k', linestyle='--')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.png', bbox_inches='tight')\n savefig(figdir_paper + '/FWfrac_obs_pwdep.pdf', bbox_inches='tight')\n\n\nplot_adep_pw()\n\n\ndef get_PWN_from_FW(x2, y1, y2, y3):\n x3 = (y3 - y1) * x2 / (y2 - y1)\n return x3\n\n\nx3_base_PWS = get_PWN_from_FW(Ubase['PWN'], (Ubase['FW'] + Ubase['SI']) *\n a_pwmat[-1, 10, 10] * 1000.0, (Ubase['FW'] + Ubase['SI']) * a_pwmat[0, \n 10, 10] * 1000.0, 50)\nx3_base_PWS\nUbase['PWN']\n1 - x3_base_PWS / Ubase['PWN']\nx3_fresh_PWS = get_PWN_from_FW(U_pw['PWN'], (U_pw['FW'] + U_pw['SI']) *\n a_pwmat[-1, 5, 5] * 1000.0, (U_pw['FW'] + U_pw['SI']) * a_pwmat[0, 5, 5\n ] * 1000.0, 50)\nx3_fresh_PWS\nU_pw['PWN']\n\n\ndef get_AWS_from_PWN(Uvar, Svar, eps):\n alpha_U = -(Uvar['PWS'] * Svar['PWS'] + (1 - eps) * Uvar['PWN'] * Svar[\n 'PWN']) / Svar['AWS']\n beta_U = -(Uvar['DWS'] * Svar['DWS'] + eps * Uvar['PWN'] * Svar['PWN']\n ) / Svar['AWS']\n return alpha_U, beta_U\n\n\nget_AWS_from_PWN(Ubase, S, 0.65)\nget_AWS_from_PWN(U_pw, S_PW, 0.65)\n",
"step-5": "from firstfuncs_1618 import *\n\nfigdir='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/Linear/'\nfigdir_paper='/home/isabela/Documents/projects/OSNAP/figures_OSNAPwide/Freshwater/paperfigs'\n\n########################################################################################################\n########################################################################################################\n#### Set up the optimization framework, which allows for varying almost all elements within a prescribed range\n########################################################################################################\n########################################################################################################\nWM=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_2008.nc')\nWM_mb=xr.open_dataset(datadir+'FW_WM/OSNAP2014-18_WM_mb_2008.nc')\n\ncp=3850\nrhow=1025\ntera=10**12\n#Noresm (taking sea ice into account)\nQ=-251*tera/rhow/cp/1e6 #for the Sverdrups\n\ndef get_U_S_T_from_WM(WM):\n U={}\n S={}\n T={}\n for wm in WM.WM:\n U[str(wm.values)]=float(WM['TRANS'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)\n S[str(wm.values)]=float(WM['PSAL'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)\n T[str(wm.values)]=float(WM['PTMP'].sel(WM=wm).groupby('TIME.month').mean('TIME').mean(dim='month').values)\n\n U['SI']=0.073 # NorESM fresh water input v. similar to Kwok et al. 2004 70mSv\n U['FW']=0.028 # mean E-P from JRA55\n U['Q']=Q\n S['SI']=0\n S['FW']=0\n T['SI']=0\n T['FW']=0\n T['Q']=1\n\n return U,S,T\n\nU,S,T=get_U_S_T_from_WM(WM)\nU_mb,S_mb,T_mb=get_U_S_T_from_WM(WM_mb)\n\ndef get_U_from_x(x):\n U={}\n U['PWS']=x[0]\n U['AWS']=x[1]\n U['DWS']=x[2]\n U['PWN']=x[3]\n U['AWN']=x[4]\n U['FW']=x[5]\n U['SI']=x[6]\n U['Q']=x[7]\n return U\n\nAM={}\nx0={}\n\nAM['base']=array([[1,1,1,1,1,1,1,0],\\\n[S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\\\n[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])\n\nx0['base']=[U['PWS'],U['AWS'],U['DWS'],U['PWN'],U['AWN'],U['FW'],U['SI'],U['Q']]\n\nAM['massbal']=array([[1,1,1,0,0,0.5,0.5,0],\\\n[0,0,0,1,1,0.5,0.5,0],\\\n[S_mb['PWS'],S_mb['AWS'],S_mb['DWS'],S_mb['PWN'],S_mb['AWN'],S_mb['FW'],S_mb['SI'],0],\\\n[T_mb['PWS'],T_mb['AWS'],T_mb['DWS'],T_mb['PWN'],T_mb['AWN'],T_mb['FW'],T_mb['SI'],1]])\n\nx0['massbal']=[U_mb['PWS'],U_mb['AWS'],U_mb['DWS'],U_mb['PWN'],U_mb['AWN'],U_mb['FW'],U_mb['SI'],U_mb['Q']]\n\nzz='base'\nAM[zz].dot(x0[zz])\n16/35\n1.5/10\n\n#vars that I want to be handy for later calcs\nSnorm=35\nTnorm=5\ndef run_inverse_model(zz,U,S,T):\n dv=-AM[zz].dot(x0[zz])\n\n if zz=='base':\n Winv=diag([1,1/Snorm,1/Tnorm])\n elif zz=='massbal':\n Winv=diag([1,1,1/Snorm,1/Tnorm])\n\n\n Evec=array([xx/5 for xx in x0[zz]])\n # Evec=hstack((5*[1],0.02,0.02,Qvar))\n E=diag(Evec)\n\n Umat,D,VmatT=linalg.svd(Winv.dot(AM[zz].dot(E)))\n\n Lambda_inv = zeros((AM[zz].shape[0], AM[zz].shape[1])).T\n Lambda_inv[:AM[zz].shape[0], :AM[zz].shape[0]] = diag(1/D)\n xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad=E.dot(xsol_prime)\n xbase=x0[zz]+xsol_Ad\n P=diag(E-E.dot(AM[zz].T.dot(linalg.inv(AM[zz].dot(E.dot(AM[zz].T))+linalg.inv(Winv)).dot(AM[zz].dot(E)))))\n Ubase=get_U_from_x(xbase)\n Ue=get_U_from_x(P)\n return Ubase,Ue,xbase\n\nUbase,Ue,xbase=run_inverse_model('base',U,S,T)\n\nUmb_sol,Umb_err,xmb=run_inverse_model('massbal',U_mb,S_mb,T_mb)\n\ncoldic={'AWS':'red','DWS':'grey','PWS':'royalblue','PWN':'purple','AWN':'orange','SI':'cyan','FW':'cyan','Q':'limegreen'}\n\ndef plot_base_case_simple(Ubase,Ue,plt):\n f,axx=subplots(1,4,figsize=(9,2.5),constrained_layout=True,gridspec_kw=dict(width_ratios=[2,3,1,1]))\n\n alf=0.75\n capi=7\n #U\n axx[0].bar(range(2),[Ubase[kk] for kk in ['AWS','DWS']],color=[coldic[kk] for kk in ['AWS','DWS']],yerr=[Ue[kk] for kk in ['AWS','DWS']],capsize=capi,alpha=alf)\n axx[0].plot(range(2),[U[kk] for kk in ['AWS','DWS']],'o',color='k')\n\n ylimi=20\n axx[0].set_ylim(-ylimi,ylimi)\n ylimi=4\n axx[1].set_ylim(-ylimi,ylimi)\n axx[1].bar(range(3),[Ubase[kk] for kk in ['PWS','PWN','AWN']],color=[coldic[kk] for kk in ['PWS','PWN','AWN']],yerr=[Ue[kk] for kk in ['PWS','PWN','AWN']],capsize=capi,alpha=alf)\n axx[1].plot(range(3),[U[kk] for kk in ['PWS','PWN','AWN']],'o',color='k')\n\n axx[2].bar(range(1),U['SI']+Ubase['FW'],color=coldic['FW'],yerr=Ue['SI']+Ue['FW'],capsize=capi,alpha=alf)\n axx[2].plot(range(1),U['SI']+U['FW'],'o',color='k')\n fwlim=0.2\n axx[2].set_ylim(-fwlim,fwlim)\n\n fsz=14\n axx[0].set_ylabel('Volume transport [Sv]',fontsize=fsz)\n axx[3].set_ylabel('Heat flux [TW]',fontsize=fsz)\n axx[3].bar(0,cp*rhow*(Ubase['Q'])/1e6,color=coldic['Q'],yerr=cp*rhow*Ue['Q']/1e6,capsize=capi,alpha=alf)\n axx[3].plot(0,cp*rhow*(U['Q'])/1e6,'o',color='k')\n\n for ii in range(3):\n axx[ii].axhline(0,color='k')\n axx[0].set_xticks(range(2))\n axx[0].set_xticklabels(['AWS','DWS'])\n axx[1].set_xticks(range(3))\n axx[1].set_xticklabels(['PWS','PWN','AWN'])\n axx[2].set_xticks(range(1))\n axx[2].set_xticklabels(['FW'])\n axx[3].set_xticks([0])\n axx[3].set_xticklabels('Q')\n\n savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.png',bbox_inches='tight')\n savefig(figdir_paper+'_extra_2004/InvBudSol_'+plt+'.pdf',bbox_inches='tight')\n\nplot_base_case_simple(Ubase,Ue,'base')\n\nU\n\nUbase['SI']+Ubase['FW']\n\nUbase['Q']*cp*rhow/1e6\n\nbasediff=[(kk,Ubase[kk]-U[kk]) for kk in Ubase]\nbasediff\n\nplot_base_case_simple(Umb_sol,Umb_err,'mb')\n[(kk,Umb_sol[kk]-U_mb[kk]) for kk in Ubase]\n##################################################################################\n# Calculate fraction of fresh water vs. other water masses that goes into each limb\n#################################################################################\n#fraction of PWN in DWS limb\nepsilon=arange(0,1.1,0.1)\n\ndef get_a_b_fracs(Ubase,S):\n #fraction of FW in PWS, as a function of epsilon\n a=((1-epsilon)*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['PWS']*(S['PWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])\n #fraction of FW in DWS, as a function of epsilon\n b=(epsilon*Ubase['PWN']*(S['PWN']/S['AWS']-1)+Ubase['DWS']*(S['DWS']/S['AWS']-1))/(Ubase['FW']+Ubase['SI'])\n return a,b\n\n\n\nS['PWN']/S['AWS']\nS['PWS']/S['AWS']\nS['DWS']/S['AWS']\n\nUbase['PWS']\nUbase['DWS']\nUbase['PWN']*(S['PWN']/S['AWS']-1)\nUbase['PWS']*(S['PWS']/S['AWS']-1)\nUbase['DWS']*(S['DWS']/S['AWS']-1)\n\n(Ubase['FW']+Ubase['SI'])\n\na={}\nb={}\na['base'],b['base']=get_a_b_fracs(Ubase,S)\na['mb'],b['mb']=get_a_b_fracs(Umb_sol,S_mb)\n[(kk,S[kk]-S_mb[kk]) for kk in S]\ndef plot_adep():\n for ii,kk in enumerate(a):\n plot(1-epsilon,a[kk],linewidth=3,label=kk,color='C'+str(ii))\n\n xlabel('$\\mathbf{1-\\epsilon}$\\nfraction of PWN in PWS')\n ylabel('$\\mathbf{a}$\\n fraction of (FW + SI) in PWS')\n xlim(0,1)\n axhline(0,color='k')\n legend()\n savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.png',bbox_inches='tight')\n savefig(figdir_paper+'_extra_2004/FWfrac_mbdep.pdf',bbox_inches='tight')\n\n\nplot_adep()\n\n#################################################################################\n##### Look into how much Sea ice properties matter\n#################################################################################\nsivar={}\nfor S_SI in range(0,10,2):\n sivar[S_SI]={}\n for T_SI in range(-90,5,10):\n AM=array([[1,1,1,1,1,1,1,0],\\\n [S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S_SI,0],\\\n [T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T_SI,1]])\n\n dv=-AM.dot(xbase)\n\n Evec=array(hstack(([1]*5,xbase[-3:]/5)))\n E=diag(Evec)\n Winv=diag([1,1/Snorm,1/Tnorm])\n Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))\n\n\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)\n xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad=E.dot(xsol_prime)\n sivar[S_SI][T_SI]=xbase+xsol_Ad\n\ndef get_mats_from_dic(sivar):\n Svec=array([float(ff) for ff in sivar])\n Tvec=array([float(ff) for ff in sivar[Svec[0]]])\n simats={}\n for QQ,kk in enumerate(Ubase):\n simats[kk]=zeros((len(Svec),len(Tvec)))\n for ii,ss in enumerate(Svec):\n for jj,tt in enumerate(Tvec):\n simats[kk][ii,jj]=sivar[ss][tt][QQ]\n return Svec,Tvec,simats\n\nSvec,Tvec,simats=get_mats_from_dic(sivar)\n\ndef plot_SIresponse():\n f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)\n axivec=array([])\n for axirow in axx:\n for axi in axirow:\n axivec=hstack((axivec,axi))\n for axi,kk in zip(axivec,simats):\n if (kk=='FW') | (kk=='SI'):\n climi=10\n contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)\n axi.set_title(kk+' [mSv]')\n cbar=colorbar(contit,ax=axi,format='%1.0f')\n elif kk=='Q':\n climi=30\n contit=axi.contourf(Svec,Tvec,cp*rhow*(simats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)\n axi.set_title(kk+' [TW]')\n cbar=colorbar(contit,ax=axi,format='%2.0f')\n else:\n climi=0.3\n contit=axi.contourf(Svec,Tvec,(simats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)\n axi.set_title(kk+' [Sv]')\n cbar=colorbar(contit,ax=axi,format='%0.2f')\n for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n label.set_visible(False)\n\n f.text(0.5, 0, 'sea ice salinity', ha='center',fontsize=14)\n f.text(0.05, 0.5, 'effective sea ice temperature [$^\\circ$C]', va='center',rotation='vertical',fontsize=14)\n\n savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.png',bbox_inches='tight')\n savefig(figdir_paper+'_extra_2004/SeaIce_paramdep.pdf',bbox_inches='tight')\n\nplot_SIresponse()\n\ncontourf(simats['AWN'].T-Ubase['AWN']+simats['PWN'].T-Ubase['PWN'])\ncolorbar()\n\n#################################################################################\n##### Test dependence on PW salinity (both north and south)\n#################################################################################\n\npwsvar={}\nfor S_PWNa in arange(-1,0.05,0.1):\n pwsvar[S_PWNa]={}\n for S_PWSa in arange(-1.0,0.05,0.1):\n AM=array([[1,1,1,1,1,1,1,0],\\\n [S['PWS']+S_PWSa,S['AWS'],S['DWS'],S['PWN']+S_PWNa,S['AWN'],S['FW'],S['SI'],0],\\\n [T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])\n\n dv=-AM.dot(xbase)\n\n Evec=array(hstack(([1]*5,xbase[-3:]/5)))\n E=diag(Evec)\n Winv=diag([1,1/Snorm,1/Tnorm])\n Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))\n\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)\n xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad=E.dot(xsol_prime)\n pwsvar[S_PWNa][S_PWSa]=xbase+xsol_Ad\n\nPWN_Svec,PWS_Svec,pwmats=get_mats_from_dic(pwsvar)\n\n\n####################################################################################################\n######## Response is pretty uniform: try to tease out a pattern (and look at other deps?) #######\n##################################################################################################\nPWN_Smat,PWS_Smat=meshgrid(PWN_Svec,PWS_Svec)\n\n\nU_si=get_U_from_x(sivar[0][-30])\n\nU_pw=get_U_from_x(pwsvar[-0.5000000000000001][-0.5000000000000001])\n\n\n\n(U_pw['FW']+U_pw['SI']-(Ubase['FW']+Ubase['SI']))*1e3\n\n\nU_pw['FW']+U_pw['SI']\n\nUbase['FW']+Ubase['SI']\n\nU_si\n\n[(kk,U_si[kk]-Ubase[kk]) for kk in Ubase]\n\n[U_si[kk]-Ubase[kk] for kk in Ubase][-1]*cp*rhow/1e6\n\nU_pw['Q']*cp*rhow/1e6\n\ndef lineplot_PW_salinity():\n f,axx=subplots(1,3,figsize=(11,3),sharey=True)\n xind=-1\n yind=-1\n svr=len(PWS_Svec)\n xvar=[(S['PWN']+PWN_Smat)[xind,:],(S['PWS']+PWS_Smat)[:,yind],[(S['PWS']+PWS_Smat)[ii,ii] for ii in range(svr)]]\n ufw_tot=-Ubase['SI']-Ubase['FW']\n yvar_fw=[pwmats['FW'].T[xind,:]+pwmats['SI'].T[xind,:]+ufw_tot,pwmats['FW'].T[:,yind]+pwmats['SI'].T[:,yind]+ufw_tot,array([pwmats['FW'].T[ii,ii]+pwmats['SI'].T[ii,ii]+ufw_tot for ii in range(svr)])]\n yvar_Q=[pwmats['Q'].T[xind,:]-Ubase['Q'],pwmats['Q'].T[:,yind]-Ubase['Q'],array([pwmats['Q'].T[ii,ii]-Ubase['Q'] for ii in range(svr)])]\n xlab=['PWN salinity','PWS salinity','PWS salinity']\n titvec=['a) Vary PWN salinity\\n\\nPWS = 34.4','b) Vary PWS salinity\\n\\nPWN = 33.7','c) Vary both PW salinities']\n lw=2\n for kk in ['AWS','PWS','DWS','AWN','PWN']:\n axx[0].plot(xvar[0],(pwmats[kk].T[xind,:]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)\n axx[1].plot(xvar[1],(pwmats[kk].T[:,yind]-Ubase[kk]),color=coldic[kk],label=kk,linewidth=lw)\n axx[2].plot(xvar[2],array([(pwmats[kk].T[ii,ii]-Ubase[kk])for ii in range(svr)]),color=coldic[kk],label=kk,linewidth=lw)\n for ii in range(3):\n ax1=axx[ii].twinx()\n for ll in ['']:\n ax1.plot(xvar[ii],(yvar_fw[ii])*1e3,color='c',linewidth=lw)\n ax2=axx[ii].twinx()\n ax2.plot(xvar[ii],cp*rhow*(yvar_Q[ii])/1e6,color='limegreen',linewidth=lw)\n axx[ii].set_xlabel(xlab[ii])\n ax1.set_ylim(-10,10)\n ax2.set_ylim(-40,40)\n axx[ii].set_title(titvec[ii],fontweight='bold')\n if ii!=2:\n ax1.set_yticklabels('')\n ax2.set_yticklabels('')\n axx[ii].set_xlim(xvar[ii][0],xvar[ii][-1])\n axx[0].set_ylim(-1.5,1.5)\n axx[0].set_yticks(arange(-1,1.1,0.5))\n ax2.spines[\"right\"].set_position((\"axes\", 1.3))\n axx[0].set_ylabel('Transport anomaly [Sv]')\n ax1.set_ylabel('Fresh water flux anomaly [mSv]',color='c')\n ax2.set_ylabel('Heat flux anomaly [TW]',color='limegreen')\n ax1.tick_params(axis='y', colors='c')\n ax2.tick_params(axis='y', colors='limegreen')\n leg=axx[0].legend(loc=(0.5,-0.5),ncol=5,fontsize=13)\n for line in leg.get_lines():\n line.set_linewidth(4.0)\n axi2=axx[2].twiny()\n axi2.set_xticks(arange(32.8,33.8,0.2))\n axi2.set_xlim(xvar[0][0],xvar[0][-1])\n axi2.set_xlabel('PWN salinity')\n axx[2].axvline(34.4-0.5,color='k',zorder=0)\n # axx[0].set_title('a) Vary PWN salinities\\n\\n',fontweight='bold')\n # axx[1].set_title('b) Vary PWS salinities\\n\\n',fontweight='bold')\n # axx[2].set_title('c) Vary both PW salinities',fontweight='bold')\n savefig(figdir_paper+'/PWS_dep.png',bbox_inches='tight')\n savefig(figdir_paper+'/PWS_dep.pdf',bbox_inches='tight')\n\nlineplot_PW_salinity()\n37/(56+37+5)\n\n\n#######################################################################################\n############## What happens if we add more FW? (Like 100mSv) ###########################\n#######################################################################################\nUbase['FW']\nUbase['SI']\n\n\nfwvar={}\nfor U_FW in arange(0,0.11,0.01):\n AM=array([[1,1,1,1,1,1,1,0],\\\n [S['PWS'],S['AWS'],S['DWS'],S['PWN'],S['AWN'],S['FW'],S['SI'],0],\\\n [T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])\n\n xinit=xbase.copy()\n xinit[5]=xinit[5]+U_FW\n dv=-AM.dot(xinit)\n\n Evec=xinit/5\n Evec[5:7]=1e-10\n E=diag(Evec)\n Winv=diag([1,1/Snorm,1/Tnorm])\n Umat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))\n\n Lambda_inv = zeros((AM.shape[0], AM.shape[1])).T\n Lambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)\n xsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\n xsol_Ad=E.dot(xsol_prime)\n fwvar[U_FW]=xinit+xsol_Ad\n\nU_fwvar=get_U_from_x(fwvar[0.02])\na_fw,b_fw=get_a_b_fracs(U_fwvar,S)\nU['FW']+U['SI']\nUbase['FW']+Ubase['SI']+0.05\nU_fwvar['FW']+U_fwvar['SI']\n\nU_fwvar['Q']*cp*rhow/1e6\nU_fwvar\n\n#######################################################################################\n############## What happens if we add more FW and make PWS fresher? ###########################\n#######################################################################################\n\nAM=array([[1,1,1,1,1,1,1,0],\\\n[S['PWS']-0.5,S['AWS'],S['DWS'],S['PWN']-0.5,S['AWN'],S['FW'],S['SI'],0],\\\n[T['PWS'],T['AWS'],T['DWS'],T['PWN'],T['AWN'],T['FW'],T['SI'],1]])\n\nxinit=xbase.copy()\nxinit[5]=xinit[5]+0.02\ndv=-AM.dot(xinit)\n\nEvec=xinit/5\nEvec[5:7]=1e-10\nE=diag(Evec)\nWinv=diag([1,1/Snorm,1/Tnorm])\nUmat,D,VmatT=linalg.svd(Winv.dot(AM.dot(E)))\n\nLambda_inv = zeros((AM.shape[0], AM.shape[1])).T\nLambda_inv[:AM.shape[0], :AM.shape[0]] = diag(1/D)\nxsol_prime=VmatT.T.dot(Lambda_inv.dot(Umat.T.dot(Winv.dot(dv))))\nxsol_Ad=E.dot(xsol_prime)\nx_both=xinit+xsol_Ad\nU_both=get_U_from_x(x_both)\n\nS_PW=S.copy()\nS_PW['PWS']=S['PWS']-0.5\nS_PW['PWN']=S['PWN']-0.5\na_both,b_both=get_a_b_fracs(U_both,S_PW)\n#######################################################################################\n############## Now look at consequences for FW dist ###########################\n#######################################################################################\na_pwmat=zeros((len(epsilon),shape(pwmats['Q'])[1],shape(pwmats['Q'])[0]))\nb_pwmat=a_pwmat.copy()\nfor ii,ee in enumerate(1-epsilon):\n a_pwmat[ii,:,:]=(ee*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['PWS'].T*((S['PWS']+PWS_Smat)/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)\n b_pwmat[ii,:,:]=((1-ee)*pwmats['PWN'].T*((S['PWN']+PWN_Smat)/S['AWS']-1)+pwmats['DWS'].T*(S['DWS']/S['AWS']-1))/(pwmats['FW'].T+pwmats['SI'].T)\nc_pwmat=1-a_pwmat-b_pwmat\n\nPWN_Smat[10,10]\nPWS_Smat[10,10]\n\nPWN_Smat[5,5]\nPWS_Smat[5,5]\n\nepsilon=arange(0,1.1,0.1)\n\nfwcol='#43a2ca'\nash='d'\n\ndef plot_adep_pw():\n f,axx=subplots(1,2,figsize=(11,3.2),sharex=True)\n f.subplots_adjust(wspace=0.3)\n for ii,var in enumerate([a_pwmat,b_pwmat]):\n if ii==0:\n xvar=(1-epsilon)\n xvar2=1\n xvar3=0\n else:\n xvar=epsilon\n xvar2=0\n xvar3=1\n axx[ii].plot(xvar*Ubase['PWN'],var[:,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,linewidth=3,color='k',label='Base case',zorder=5)\n axx[ii].plot(xvar*U_pw['PWN'],var[:,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,color='purple',zorder=4,label='Polar Waters fresher by 0.5',linewidth=3)\n axx[ii].plot(xvar2*Ubase['PWN'],var[0,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,'o',color='k',label='',zorder=5)\n axx[ii].plot(xvar2*U_pw['PWN'],var[0,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,'o',color='purple',zorder=4,label='')\n axx[ii].plot(xvar3*Ubase['PWN'],var[-1,10,10]*(Ubase['FW']+Ubase['SI'])*1e3,ash,color='k',label='',zorder=5)\n axx[ii].plot(xvar3*U_pw['PWN'],var[-1,5,5]*(U_pw['FW']+U_pw['SI'])*1e3,ash,color='purple',zorder=4,label='')\n axx[ii].set_ylim(-30,140)\n axx[0].plot((1-epsilon)*U_fwvar['PWN'],a_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol,label='Add 20 mSv of Fresh Water')\n axx[1].plot(epsilon*U_fwvar['PWN'],b_fw*(U_fwvar['FW']+U_fwvar['SI'])*1e3,linewidth=3,color=fwcol)\n axx[0].plot(U_fwvar['PWN'],a_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')\n axx[1].plot(0,b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'o',color=fwcol,label='')\n axx[0].plot(0,a_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')\n axx[1].plot(U_fwvar['PWN'],b_fw[-1]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,ash,color=fwcol,label='')\n axx[0].plot(0.5,56,'*',color='k',label='',markersize=10)\n axx[0].plot(1.1,56,'*',color='purple',label='',markersize=10)\n axx[1].plot(1.3,37,'*',color='k',label='',markersize=10)\n axx[1].plot(1,37,'*',color='purple',label='',markersize=10)\n # axx[1].plot(U_fwvar['PWN'],b_fw[0]*(U_fwvar['FW']+U_fwvar['SI'])*1e3,'s',color='k',label='')\n\n # axx[0].plot(1-epsilon,a_both,linewidth=3,color='g',label='Both')\n # axx[1].plot(1-epsilon,b_both,linewidth=3,color='g')\n axx[0].legend(loc=(0.05,-0.5),ncol=3,fontsize=12)\n axx[0].set_title('a) Estuarine limb',fontsize=14)\n axx[1].set_title('b) Overturning limb',fontsize=14)\n axx[0].set_ylabel('$\\mathbf{\\delta}\\ U_{FW}$\\nFW transport in $\\mathbf{PWS}$ [mSv]')\n axx[1].set_ylabel('$\\mathbf{\\gamma}\\ U_{FW}$\\nFW transport in $\\mathbf{DWS}$ [mSv]')\n axx[0].set_xlabel('$\\mathbf{(1-\\epsilon)} \\ U_{PWN}$\\nPWN transport in $\\mathbf{PWS}$ [Sv]')\n axx[1].set_xlabel('$\\mathbf{\\epsilon} \\ U_{PWN}$\\nPWN transport in $\\mathbf{DWS}$ [Sv]')\n for axi in axx[0],axx[1]:\n axi.axhline(0,color='k')\n axi.set_xlim(-0.05,2.2)\n axx[0].axhline(56,color='k',linestyle='--')\n axx[1].axhline(37,color='k',linestyle='--')\n savefig(figdir_paper+'/FWfrac_obs_pwdep.png',bbox_inches='tight')\n savefig(figdir_paper+'/FWfrac_obs_pwdep.pdf',bbox_inches='tight')\n\nplot_adep_pw()\n\n\ndef get_PWN_from_FW(x2,y1,y2,y3):\n x3=(y3-y1)*x2/(y2-y1)\n return x3\n\nx3_base_PWS=get_PWN_from_FW(Ubase['PWN'],(Ubase['FW']+Ubase['SI'])*a_pwmat[-1,10,10]*1e3,(Ubase['FW']+Ubase['SI'])*a_pwmat[0,10,10]*1e3,50)\n\nx3_base_PWS\nUbase['PWN']\n1-x3_base_PWS/Ubase['PWN']\n\n\nx3_fresh_PWS=get_PWN_from_FW(U_pw['PWN'],(U_pw['FW']+U_pw['SI'])*a_pwmat[-1,5,5]*1e3,(U_pw['FW']+U_pw['SI'])*a_pwmat[0,5,5]*1e3,50)\n\nx3_fresh_PWS\nU_pw['PWN']\n\n\ndef get_AWS_from_PWN(Uvar,Svar,eps):\n alpha_U=-(Uvar['PWS']*Svar['PWS']+(1-eps)*Uvar['PWN']*Svar['PWN'])/Svar['AWS']\n beta_U=-(Uvar['DWS']*Svar['DWS']+eps*Uvar['PWN']*Svar['PWN'])/Svar['AWS']\n return alpha_U,beta_U\n\nget_AWS_from_PWN(Ubase,S,0.65)\nget_AWS_from_PWN(U_pw,S_PW,0.65)\n\n############################graveyard\n\n# def plot_in_each(axi):\n# axi.plot(S['PWN'],S['PWS'],'ko',markersize=10)\n# axi.plot(S['PWN']+PWN_Svec,S['PWN']+PWN_Svec,'r-',linewidth=3)\n#\n# def plot_PW_Sdep(Svec,Tvec,simats):\n# f,axx=subplots(2,4,figsize=(15,6),sharex=True,sharey=True)\n# axivec=array([])\n# for axirow in axx:\n# for axi in axirow:\n# axivec=hstack((axivec,axi))\n# for axi,kk in zip(axivec,simats):\n# if (kk=='FW') | (kk=='SI'):\n# climi=20\n# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk])*1e3,vmin=-climi,vmax=climi,cmap=cm.RdBu)\n# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')\n# axi.set_title(kk+' [mSv]')\n# cbar=colorbar(contit,ax=axi,format='%1.0f')\n# plot_in_each(axi)\n# elif kk=='Q':\n# climi=30\n# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,cp*rhow*(pwmats['Q'].T-Ubase['Q'])/1e6,vmin=-climi,vmax=climi,cmap=cm.PiYG_r)\n# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')\n# axi.set_title(kk+' [TW]')\n# cbar=colorbar(contit,ax=axi,format='%2.0f')\n# plot_in_each(axi)\n# else:\n# climi=1.5\n# contit=axi.contourf(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),vmin=-climi,vmax=climi,cmap=cm.PuOr_r)\n# axi.contour(S['PWN']+PWN_Svec,S['PWS']+PWS_Svec,(pwmats[kk].T-Ubase[kk]),levels=[0],colors='k')\n# axi.set_title(kk+' [Sv]')\n# cbar=colorbar(contit,ax=axi,format='%0.2f')\n# plot_in_each(axi)\n# for label in cbar.ax.yaxis.get_ticklabels()[1::2]:\n# label.set_visible(False)\n# axi.set_ylim(S['PWS']+PWS_Svec[0],S['PWS']+PWS_Svec[-1])\n# f.text(0.5, 0, 'PWN salinity', ha='center',fontsize=14)\n# f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)\n#\n# savefig(figdir_paper+'_extra_2004/PW_Sdep.png',bbox_inches='tight')\n# savefig(figdir_paper+'_extra_2004/PW_Sdep.pdf',bbox_inches='tight')\n#\n#\n# plot_PW_Sdep(PWN_Svec,PWS_Svec,pwmats)\n\n\n# def plot_PW_Sdep_lines():\n# f,axx=subplots(2,4,figsize=(15,6),sharex=True)\n# axivec=array([])\n# for axirow in axx:\n# for axi in axirow:\n# axivec=hstack((axivec,axi))\n# for axi,kk in zip(axivec,simats):\n# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[-2,:],(pwmats[kk].T[-2,:]),label='vary PWN salinity')\n# axi.plot(((S['PWN']+PWN_Smat)-(S['PWS']+PWS_Smat))[:,-3],(pwmats[kk].T[:,-3]),label='vary PWS salinity')\n# axi.plot(((S['PWN'])-(S['PWS'])),(Ubase[kk]),'ko',label='base case')\n# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[5,5]),'ro',label='both 0.5 fresher')\n# axi.plot(((S['PWN'])-(S['PWS'])),(pwmats[kk].T[0,0]),'go',label='both 1 fresher')\n# axi.set_title(kk)\n# axi.legend(loc=(1,0.7))\n# f.text(0.5, 0, 'PWN salinity - PWS salinity', ha='center',fontsize=14)\n# # f.text(0.05, 0.5, 'PWS salinity', va='center',rotation='vertical',fontsize=14)\n#\n# # savefig(figdir_paper+'/PW_Sdep.png',bbox_inches='tight')\n# # savefig(figdir_paper+'/PW_Sdep.pdf',bbox_inches='tight')\n#\n# plot_PW_Sdep_lines()\n# Ubase.keys()\n",
"step-ids": [
10,
11,
13,
14,
16
]
}
|
[
10,
11,
13,
14,
16
] |
import ply.lex as lex
print("hello word!")
|
normal
|
{
"blob_id": "84d0c439fcee4339250ced11dd2264740cc20d9c",
"index": 9567,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('hello word!')\n",
"step-3": "import ply.lex as lex\nprint('hello word!')\n",
"step-4": "import ply.lex as lex\n\nprint(\"hello word!\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#
# Copyright John Reid 2009
#
"""
Code to handle bootstrap analyses.
"""
from itertools import cycle
import random
import bisect
def generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):
"""
Yield samples that match the sizes given in test_set_sizes
"""
for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):
yield random.sample(test_universe, sample_size)
def calculate_bootstrap_statistics(samples, statistic):
"Calculate the bootstrap statistics for the samples."
stats = list(map(statistic, samples))
stats.sort()
return stats
def bootstrap_p_value(bootstrap_stats, stat_value):
"""
Calculate the p-value for the statistic's value given the bootstrap values.
"""
return 1. - bisect.bisect_left(bootstrap_stats, stat_value) / float(len(bootstrap_stats))
|
normal
|
{
"blob_id": "752affdfa1481b9a19a9b7dfe76f9d5d11c80073",
"index": 4678,
"step-1": "<mask token>\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1.0 - bisect.bisect_left(bootstrap_stats, stat_value) / float(len\n (bootstrap_stats))\n",
"step-2": "<mask token>\n\n\ndef generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n \"\"\"\n Yield samples that match the sizes given in test_set_sizes\n \"\"\"\n for sample_idx, sample_size in zip(range(num_samples), cycle(\n test_set_sizes)):\n yield random.sample(test_universe, sample_size)\n\n\n<mask token>\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1.0 - bisect.bisect_left(bootstrap_stats, stat_value) / float(len\n (bootstrap_stats))\n",
"step-3": "<mask token>\n\n\ndef generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n \"\"\"\n Yield samples that match the sizes given in test_set_sizes\n \"\"\"\n for sample_idx, sample_size in zip(range(num_samples), cycle(\n test_set_sizes)):\n yield random.sample(test_universe, sample_size)\n\n\ndef calculate_bootstrap_statistics(samples, statistic):\n \"\"\"Calculate the bootstrap statistics for the samples.\"\"\"\n stats = list(map(statistic, samples))\n stats.sort()\n return stats\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1.0 - bisect.bisect_left(bootstrap_stats, stat_value) / float(len\n (bootstrap_stats))\n",
"step-4": "<mask token>\nfrom itertools import cycle\nimport random\nimport bisect\n\n\ndef generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n \"\"\"\n Yield samples that match the sizes given in test_set_sizes\n \"\"\"\n for sample_idx, sample_size in zip(range(num_samples), cycle(\n test_set_sizes)):\n yield random.sample(test_universe, sample_size)\n\n\ndef calculate_bootstrap_statistics(samples, statistic):\n \"\"\"Calculate the bootstrap statistics for the samples.\"\"\"\n stats = list(map(statistic, samples))\n stats.sort()\n return stats\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1.0 - bisect.bisect_left(bootstrap_stats, stat_value) / float(len\n (bootstrap_stats))\n",
"step-5": "#\n# Copyright John Reid 2009\n#\n\n\n\"\"\"\nCode to handle bootstrap analyses.\n\"\"\"\n\nfrom itertools import cycle\nimport random\nimport bisect\n\n\ndef generate_bootstrap_samples(num_samples, test_universe, test_set_sizes):\n \"\"\"\n Yield samples that match the sizes given in test_set_sizes\n \"\"\"\n for sample_idx, sample_size in zip(range(num_samples), cycle(test_set_sizes)):\n yield random.sample(test_universe, sample_size)\n\n\ndef calculate_bootstrap_statistics(samples, statistic):\n \"Calculate the bootstrap statistics for the samples.\"\n stats = list(map(statistic, samples))\n stats.sort()\n return stats\n\n\ndef bootstrap_p_value(bootstrap_stats, stat_value):\n \"\"\"\n Calculate the p-value for the statistic's value given the bootstrap values.\n \"\"\"\n return 1. - bisect.bisect_left(bootstrap_stats, stat_value) / float(len(bootstrap_stats))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from point import Point
from velocity import Velocity
import arcade
import config
PADDLE_WIDTH = 15
PADDLE_HEIGHT = 30
class Paddle:
def __init__(self):
self.center = Point(390, 50)
self.velocity = Velocity(0, 5)
def draw(self):
self.drawing = arcade.draw_rectangle_filled(self.center.x, self.center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.color.ELECTRIC_LIME)
def move_up(self):
if self.center.y < config.SCREEN_HEIGHT - (config.PADDLE_HEIGHT / 2):
self.center.y = self.center.y + self.velocity.dy
def move_down(self):
if self.center.y > 0 + (config.PADDLE_HEIGHT / 2):
self.center.y = self.center.y - self.velocity.dy
|
normal
|
{
"blob_id": "cb3c1adb9d91aecee5b21774d61dfe9400a330fa",
"index": 619,
"step-1": "<mask token>\n\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n <mask token>\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y - self.velocity.dy\n",
"step-2": "<mask token>\n\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n\n def draw(self):\n self.drawing = arcade.draw_rectangle_filled(self.center.x, self.\n center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.\n color.ELECTRIC_LIME)\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y - self.velocity.dy\n",
"step-3": "<mask token>\nPADDLE_WIDTH = 15\nPADDLE_HEIGHT = 30\n\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n\n def draw(self):\n self.drawing = arcade.draw_rectangle_filled(self.center.x, self.\n center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.\n color.ELECTRIC_LIME)\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y - self.velocity.dy\n",
"step-4": "from point import Point\nfrom velocity import Velocity\nimport arcade\nimport config\nPADDLE_WIDTH = 15\nPADDLE_HEIGHT = 30\n\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n\n def draw(self):\n self.drawing = arcade.draw_rectangle_filled(self.center.x, self.\n center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.\n color.ELECTRIC_LIME)\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + config.PADDLE_HEIGHT / 2:\n self.center.y = self.center.y - self.velocity.dy\n",
"step-5": "from point import Point\nfrom velocity import Velocity\nimport arcade\nimport config\n\nPADDLE_WIDTH = 15\nPADDLE_HEIGHT = 30\n\nclass Paddle:\n\n def __init__(self):\n self.center = Point(390, 50)\n self.velocity = Velocity(0, 5)\n\n def draw(self):\n self.drawing = arcade.draw_rectangle_filled(self.center.x, self.center.y, config.PADDLE_WIDTH, config.PADDLE_HEIGHT, arcade.color.ELECTRIC_LIME)\n\n def move_up(self):\n if self.center.y < config.SCREEN_HEIGHT - (config.PADDLE_HEIGHT / 2):\n self.center.y = self.center.y + self.velocity.dy\n\n def move_down(self):\n if self.center.y > 0 + (config.PADDLE_HEIGHT / 2):\n self.center.y = self.center.y - self.velocity.dy\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# Generated by Django 3.0.1 on 2020-03-20 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('page', '0004_auto_20200320_1521'),
]
operations = [
migrations.AddField(
model_name='menu',
name='level',
field=models.PositiveIntegerField(default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='menu',
name='lft',
field=models.PositiveIntegerField(default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='menu',
name='rght',
field=models.PositiveIntegerField(default=0, editable=False),
preserve_default=False,
),
migrations.AddField(
model_name='menu',
name='tree_id',
field=models.PositiveIntegerField(db_index=True, default=1, editable=False),
preserve_default=False,
),
migrations.DeleteModel(
name='Menu1',
),
]
|
normal
|
{
"blob_id": "807b20f4912ab89bf73966961536a4cd4367f851",
"index": 6468,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('page', '0004_auto_20200320_1521')]\n operations = [migrations.AddField(model_name='menu', name='level',\n field=models.PositiveIntegerField(default=0, editable=False),\n preserve_default=False), migrations.AddField(model_name='menu',\n name='lft', field=models.PositiveIntegerField(default=0, editable=\n False), preserve_default=False), migrations.AddField(model_name=\n 'menu', name='rght', field=models.PositiveIntegerField(default=0,\n editable=False), preserve_default=False), migrations.AddField(\n model_name='menu', name='tree_id', field=models.\n PositiveIntegerField(db_index=True, default=1, editable=False),\n preserve_default=False), migrations.DeleteModel(name='Menu1')]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('page', '0004_auto_20200320_1521')]\n operations = [migrations.AddField(model_name='menu', name='level',\n field=models.PositiveIntegerField(default=0, editable=False),\n preserve_default=False), migrations.AddField(model_name='menu',\n name='lft', field=models.PositiveIntegerField(default=0, editable=\n False), preserve_default=False), migrations.AddField(model_name=\n 'menu', name='rght', field=models.PositiveIntegerField(default=0,\n editable=False), preserve_default=False), migrations.AddField(\n model_name='menu', name='tree_id', field=models.\n PositiveIntegerField(db_index=True, default=1, editable=False),\n preserve_default=False), migrations.DeleteModel(name='Menu1')]\n",
"step-5": "# Generated by Django 3.0.1 on 2020-03-20 09:59\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('page', '0004_auto_20200320_1521'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='menu',\n name='level',\n field=models.PositiveIntegerField(default=0, editable=False),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='menu',\n name='lft',\n field=models.PositiveIntegerField(default=0, editable=False),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='menu',\n name='rght',\n field=models.PositiveIntegerField(default=0, editable=False),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='menu',\n name='tree_id',\n field=models.PositiveIntegerField(db_index=True, default=1, editable=False),\n preserve_default=False,\n ),\n migrations.DeleteModel(\n name='Menu1',\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 2.0.4 on 2018-04-30 14:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('base', '0007_topfilter'),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('base_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Base')),
('title', models.CharField(max_length=50)),
('text', models.TextField()),
('post_related_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)),
],
bases=('base.base',),
),
]
|
normal
|
{
"blob_id": "d13589979ba7b6facd8339111323270c9920a9bf",
"index": 8127,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('base', '0007_topfilter')]\n operations = [migrations.CreateModel(name='Post', fields=[('base_ptr',\n models.OneToOneField(auto_created=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, primary_key=True, serialize=\n False, to='base.Base')), ('title', models.CharField(max_length=50)),\n ('text', models.TextField()), ('post_related_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)\n )], bases=('base.base',))]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('base', '0007_topfilter')]\n operations = [migrations.CreateModel(name='Post', fields=[('base_ptr',\n models.OneToOneField(auto_created=True, on_delete=django.db.models.\n deletion.CASCADE, parent_link=True, primary_key=True, serialize=\n False, to='base.Base')), ('title', models.CharField(max_length=50)),\n ('text', models.TextField()), ('post_related_user', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE,\n related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)\n )], bases=('base.base',))]\n",
"step-5": "# Generated by Django 2.0.4 on 2018-04-30 14:01\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('base', '0007_topfilter'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('base_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='base.Base')),\n ('title', models.CharField(max_length=50)),\n ('text', models.TextField()),\n ('post_related_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_related_user_name', to=settings.AUTH_USER_MODEL)),\n ],\n bases=('base.base',),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
hexa = title[:2]
hexb = title[2:4]
title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +
ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename = os.path.basename(pathname)
new_filename = re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(pathname, os.path.join(os.path.dirname(pathname),
new_filename))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
hexa = title[:2]
hexb = title[2:4]
title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +
ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename = os.path.basename(pathname)
new_filename = re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(pathname, os.path.join(os.path.dirname(pathname),
new_filename))
rename('C:\\test', '*.jpeg', '%s')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
lst = []
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
hexa = title[:2]
hexb = title[2:4]
title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +
ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename = os.path.basename(pathname)
new_filename = re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(pathname, os.path.join(os.path.dirname(pathname),
new_filename))
rename('C:\\test', '*.jpeg', '%s')
<|reserved_special_token_1|>
import re, glob, os
lst = []
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
hexa = title[:2]
hexb = title[2:4]
title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))
os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +
ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename = os.path.basename(pathname)
new_filename = re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(pathname, os.path.join(os.path.dirname(pathname),
new_filename))
rename('C:\\test', '*.jpeg', '%s')
<|reserved_special_token_1|>
import re, glob, os
lst = []
def rename(dir, pattern, titlePattern):
for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):
title, ext = os.path.splitext(os.path.basename(pathAndFilename))
#title = title[22:]
#hexa = []
#hexb = []
hexa = title[:2]
hexb = title[2:4]
#title = title[4:]
title = (title[4:] + '_' + str(int(hexa,16)) + '_' + str(int(hexb, 16)))
#print(title)
#lst.append(title)
os.rename(pathAndFilename,
os.path.join(dir, titlePattern % title + ext))
def renamer(files, pattern, replacement):
for pathname in glob.glob(files):
basename= os.path.basename(pathname)
new_filename= re.sub(pattern, replacement, basename)
if new_filename != basename:
os.rename(
pathname,
os.path.join(os.path.dirname(pathname), new_filename))
rename(r'C:\test', r'*.jpeg', r'%s')
#print(lst)
|
flexible
|
{
"blob_id": "22aa6042b77c3cfd1f102a0ea22a43223e366d2f",
"index": 1476,
"step-1": "<mask token>\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-3": "<mask token>\nlst = []\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-4": "import re, glob, os\nlst = []\n\n\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n hexa = title[:2]\n hexb = title[2:4]\n title = title[4:] + '_' + str(int(hexa, 16)) + '_' + str(int(hexb, 16))\n os.rename(pathAndFilename, os.path.join(dir, titlePattern % title +\n ext))\n\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename = os.path.basename(pathname)\n new_filename = re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(pathname, os.path.join(os.path.dirname(pathname),\n new_filename))\n\n\nrename('C:\\\\test', '*.jpeg', '%s')\n",
"step-5": "import re, glob, os\nlst = []\ndef rename(dir, pattern, titlePattern):\n for pathAndFilename in glob.iglob(os.path.join(dir, pattern)):\n title, ext = os.path.splitext(os.path.basename(pathAndFilename))\n #title = title[22:]\n #hexa = []\n #hexb = []\n hexa = title[:2]\n hexb = title[2:4]\n #title = title[4:]\n\n title = (title[4:] + '_' + str(int(hexa,16)) + '_' + str(int(hexb, 16)))\n \n #print(title)\n #lst.append(title)\n os.rename(pathAndFilename, \n os.path.join(dir, titlePattern % title + ext))\n\ndef renamer(files, pattern, replacement):\n for pathname in glob.glob(files):\n basename= os.path.basename(pathname)\n new_filename= re.sub(pattern, replacement, basename)\n if new_filename != basename:\n os.rename(\n pathname,\n os.path.join(os.path.dirname(pathname), new_filename))\n\n\nrename(r'C:\\test', r'*.jpeg', r'%s')\n#print(lst)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class section:
def __init__(self, i0, j0, subImg, Params):
self.Params = Params
self.subParams = {}
self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]
self.subParams['subSize'] = subImg.shape
self.subParams['bigSize'] = [np.int(Params['size'] / Params[
'numFiles'])] * 2
self.S = np.empty([self.subParams['bigSize'][0], self.subParams[
'bigSize'][1], 3], dtype=np.complex64)
self.P = np.empty([self.subParams['subSize'][0], self.subParams[
'subSize'][1], 3], dtype=np.complex64)
self.meanFFT = np.zeros([self.subParams['subSize'][0], self.
subParams['subSize'][1], 3], dtype=np.complex64)
self.meanNum = 0
self.subParams['fRApprox'] = np.empty([3], dtype=int)
self.subParams['coords'] = np.empty([3, 16, 16, 2])
self.subParams['isBF'] = np.empty([3, 16, 16])
for i in range(0, 3):
self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[
'bigSize'])
self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],
Params['NA'], self.subParams['wLen'][i])
print(Params['NA'], self.subParams['wLen'][i], Params['mag'],
Params['ps'], Params['smallSize'])
self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.
subParams['fRApprox'][i])
self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,
:, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],
self.subParams['fRApprox'][i])
self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])
self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])
for i in range(3):
self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([
Params['divisor'], Params['divisor']], i)
def initS0(self, img, size):
""" Initialises the FT of the high res image by linear interpolation of a low res image """
I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)
amplitude = np.sqrt(I0)
FI0 = fft2(ifftshift(amplitude))
FI0 = fftshift(FI0)
S = np.array(FI0, dtype=np.complex64)
return S
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def initCoords(self, i, j, wLen, Rad):
""" Returns 2D array where LED coords relate to fourier centre positions """
segmentPos = [i, j]
n = self.Params['numFiles']
w = self.subParams['subSize'][0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0
] / self.Params['mag']
self.Params['centre'] = centre
coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][
1], 2), dtype=np.int32)
isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]
), dtype=np.int32)
numImgs = int(len(self.Params['images']) ** 0.5)
for i, img in enumerate(self.Params['images']):
LED = meth.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)
coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1
] = LEDPixelPos
if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2
) ** 2 < Rad:
isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs /
2) - 1] = 1
return coords, isBF
def getLEDPos(self, nx, ny, centre, wLen):
""" Determines the location of the centre of the fourier pattern in pixels """
ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.
Params['distance'])
ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.
Params['distance'])
dx = ax / (wLen * self.Params['fResolution'][0])
dy = ay / (wLen * self.Params['fResolution'][1])
pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.
subParams['subSize'][0] / 2)]
return pos
class splitImage:
def __init__(self, dir, imgName, numSplits, splitSize):
self.LEDPos = meth.getLED(imgName)
self.subImg = np.empty([numSplits, numSplits], dtype=subImage)
for i in range(numSplits):
for j in range(numSplits):
self.subImg[i, j] = subImage(dir, splitSize, imgName, self.
LEDPos, i, j)
class subImage:
def __init__(self, dir, splitSize, imgName, LEDPos, i, j):
img = meth.readImage(dir, imgName)
self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(
j + 1) * splitSize]
self.imgPos = [i, j]
self.LEDPos = LEDPos
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class section:
def __init__(self, i0, j0, subImg, Params):
self.Params = Params
self.subParams = {}
self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]
self.subParams['subSize'] = subImg.shape
self.subParams['bigSize'] = [np.int(Params['size'] / Params[
'numFiles'])] * 2
self.S = np.empty([self.subParams['bigSize'][0], self.subParams[
'bigSize'][1], 3], dtype=np.complex64)
self.P = np.empty([self.subParams['subSize'][0], self.subParams[
'subSize'][1], 3], dtype=np.complex64)
self.meanFFT = np.zeros([self.subParams['subSize'][0], self.
subParams['subSize'][1], 3], dtype=np.complex64)
self.meanNum = 0
self.subParams['fRApprox'] = np.empty([3], dtype=int)
self.subParams['coords'] = np.empty([3, 16, 16, 2])
self.subParams['isBF'] = np.empty([3, 16, 16])
for i in range(0, 3):
self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[
'bigSize'])
self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],
Params['NA'], self.subParams['wLen'][i])
print(Params['NA'], self.subParams['wLen'][i], Params['mag'],
Params['ps'], Params['smallSize'])
self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.
subParams['fRApprox'][i])
self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,
:, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],
self.subParams['fRApprox'][i])
self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])
self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])
for i in range(3):
self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([
Params['divisor'], Params['divisor']], i)
def initS0(self, img, size):
""" Initialises the FT of the high res image by linear interpolation of a low res image """
I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)
amplitude = np.sqrt(I0)
FI0 = fft2(ifftshift(amplitude))
FI0 = fftshift(FI0)
S = np.array(FI0, dtype=np.complex64)
return S
def initP0(self, size, radius):
""" Initialises the pupil function as a real circular step function of value 1 """
return h.circle(size, radius)[:, :, 0]
def fRad(self, fDu, NA, wLen):
""" Determines the approximate radius in F-space in pixels of the pupil function """
x = 2 * np.pi * NA / (wLen * fDu[0])
y = 2 * np.pi * NA / (wLen * fDu[1])
avr = np.int32(np.average([x, y]))
return avr
def initCoords(self, i, j, wLen, Rad):
""" Returns 2D array where LED coords relate to fourier centre positions """
segmentPos = [i, j]
n = self.Params['numFiles']
w = self.subParams['subSize'][0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0
] / self.Params['mag']
self.Params['centre'] = centre
coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][
1], 2), dtype=np.int32)
isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]
), dtype=np.int32)
numImgs = int(len(self.Params['images']) ** 0.5)
for i, img in enumerate(self.Params['images']):
LED = meth.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)
coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1
] = LEDPixelPos
if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2
) ** 2 < Rad:
isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs /
2) - 1] = 1
return coords, isBF
def getLEDPos(self, nx, ny, centre, wLen):
""" Determines the location of the centre of the fourier pattern in pixels """
ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.
Params['distance'])
ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.
Params['distance'])
dx = ax / (wLen * self.Params['fResolution'][0])
dy = ay / (wLen * self.Params['fResolution'][1])
pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.
subParams['subSize'][0] / 2)]
return pos
class splitImage:
def __init__(self, dir, imgName, numSplits, splitSize):
self.LEDPos = meth.getLED(imgName)
self.subImg = np.empty([numSplits, numSplits], dtype=subImage)
for i in range(numSplits):
for j in range(numSplits):
self.subImg[i, j] = subImage(dir, splitSize, imgName, self.
LEDPos, i, j)
class subImage:
def __init__(self, dir, splitSize, imgName, LEDPos, i, j):
img = meth.readImage(dir, imgName)
self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(
j + 1) * splitSize]
self.imgPos = [i, j]
self.LEDPos = LEDPos
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class fullSys:
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def getDivisor(self, img, splitSize):
imgSize = img.shape[0]
while True:
if imgSize % splitSize == 0:
divisor = splitSize
break
splitSize += 1
numFiles = int(imgSize / divisor)
return numFiles, divisor
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class section:
def __init__(self, i0, j0, subImg, Params):
self.Params = Params
self.subParams = {}
self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]
self.subParams['subSize'] = subImg.shape
self.subParams['bigSize'] = [np.int(Params['size'] / Params[
'numFiles'])] * 2
self.S = np.empty([self.subParams['bigSize'][0], self.subParams[
'bigSize'][1], 3], dtype=np.complex64)
self.P = np.empty([self.subParams['subSize'][0], self.subParams[
'subSize'][1], 3], dtype=np.complex64)
self.meanFFT = np.zeros([self.subParams['subSize'][0], self.
subParams['subSize'][1], 3], dtype=np.complex64)
self.meanNum = 0
self.subParams['fRApprox'] = np.empty([3], dtype=int)
self.subParams['coords'] = np.empty([3, 16, 16, 2])
self.subParams['isBF'] = np.empty([3, 16, 16])
for i in range(0, 3):
self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[
'bigSize'])
self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],
Params['NA'], self.subParams['wLen'][i])
print(Params['NA'], self.subParams['wLen'][i], Params['mag'],
Params['ps'], Params['smallSize'])
self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.
subParams['fRApprox'][i])
self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,
:, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],
self.subParams['fRApprox'][i])
self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])
self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])
for i in range(3):
self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([
Params['divisor'], Params['divisor']], i)
def initS0(self, img, size):
""" Initialises the FT of the high res image by linear interpolation of a low res image """
I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)
amplitude = np.sqrt(I0)
FI0 = fft2(ifftshift(amplitude))
FI0 = fftshift(FI0)
S = np.array(FI0, dtype=np.complex64)
return S
def initP0(self, size, radius):
""" Initialises the pupil function as a real circular step function of value 1 """
return h.circle(size, radius)[:, :, 0]
def fRad(self, fDu, NA, wLen):
""" Determines the approximate radius in F-space in pixels of the pupil function """
x = 2 * np.pi * NA / (wLen * fDu[0])
y = 2 * np.pi * NA / (wLen * fDu[1])
avr = np.int32(np.average([x, y]))
return avr
def initCoords(self, i, j, wLen, Rad):
""" Returns 2D array where LED coords relate to fourier centre positions """
segmentPos = [i, j]
n = self.Params['numFiles']
w = self.subParams['subSize'][0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0
] / self.Params['mag']
self.Params['centre'] = centre
coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][
1], 2), dtype=np.int32)
isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]
), dtype=np.int32)
numImgs = int(len(self.Params['images']) ** 0.5)
for i, img in enumerate(self.Params['images']):
LED = meth.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)
coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1
] = LEDPixelPos
if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2
) ** 2 < Rad:
isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs /
2) - 1] = 1
return coords, isBF
def getLEDPos(self, nx, ny, centre, wLen):
""" Determines the location of the centre of the fourier pattern in pixels """
ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.
Params['distance'])
ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.
Params['distance'])
dx = ax / (wLen * self.Params['fResolution'][0])
dy = ay / (wLen * self.Params['fResolution'][1])
pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.
subParams['subSize'][0] / 2)]
return pos
class splitImage:
def __init__(self, dir, imgName, numSplits, splitSize):
self.LEDPos = meth.getLED(imgName)
self.subImg = np.empty([numSplits, numSplits], dtype=subImage)
for i in range(numSplits):
for j in range(numSplits):
self.subImg[i, j] = subImage(dir, splitSize, imgName, self.
LEDPos, i, j)
class subImage:
def __init__(self, dir, splitSize, imgName, LEDPos, i, j):
img = meth.readImage(dir, imgName)
self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(
j + 1) * splitSize]
self.imgPos = [i, j]
self.LEDPos = LEDPos
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class fullSys:
def __init__(self, dir, file, size, line):
csv_reader = pandas.read_csv(file, index_col='Objective')
self.Params = {}
self.Params['mag'] = csv_reader['Magnification'][line]
self.Params['NA'] = csv_reader['NA'][line]
self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader[
'Pixel Size y'][line]]
self.Params['distance'] = csv_reader['Screen Distance'][line]
self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]
self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader[
'Num LED x'][line]]
self.Params['dir'] = dir
self.Params['images'] = os.listdir(dir)
self.Params['numImgs'] = len(self.Params['images'])
self.Params['smallSize'] = meth.readImage(dir, self.Params['images'
][0], colour=1, getsize=True)
self.Params['fResolution'] = self.fRes(self.Params['mag'], self.
Params['smallSize'], self.Params['ps'])
print('fullSys')
splitSize, self.Params['lc'] = self.getSS()
img = meth.readImage(self.Params['dir'], self.Params['images'][0])
print('fullSys2')
numFiles, divisor = self.getDivisor(img, splitSize)
print('fullSys2')
self.Params['numFiles'] = numFiles
self.Params['divisor'] = divisor
self.Params['size'] = self.getSize(size, numFiles)
self.subObjs = np.empty([numFiles, numFiles], dtype=section)
print('fullSys1')
for i in range(numFiles):
for j in range(numFiles):
subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j +
1) * divisor]
self.subObjs[i, j] = section(i, j, subImg, self.Params)
h.progbar(i, numFiles, 'Initializing')
def getSS(self):
""" Determines the required subsection size based on Cittert Zernike theorem """
rho = 0.0003
lc = 0.61 * R * 530 / rho
size = lc * slef.Params['mag'] / self.Params['ps']
return size, lc
def getDivisor(self, img, splitSize):
imgSize = img.shape[0]
while True:
if imgSize % splitSize == 0:
divisor = splitSize
break
splitSize += 1
numFiles = int(imgSize / divisor)
return numFiles, divisor
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class section:
def __init__(self, i0, j0, subImg, Params):
self.Params = Params
self.subParams = {}
self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]
self.subParams['subSize'] = subImg.shape
self.subParams['bigSize'] = [np.int(Params['size'] / Params[
'numFiles'])] * 2
self.S = np.empty([self.subParams['bigSize'][0], self.subParams[
'bigSize'][1], 3], dtype=np.complex64)
self.P = np.empty([self.subParams['subSize'][0], self.subParams[
'subSize'][1], 3], dtype=np.complex64)
self.meanFFT = np.zeros([self.subParams['subSize'][0], self.
subParams['subSize'][1], 3], dtype=np.complex64)
self.meanNum = 0
self.subParams['fRApprox'] = np.empty([3], dtype=int)
self.subParams['coords'] = np.empty([3, 16, 16, 2])
self.subParams['isBF'] = np.empty([3, 16, 16])
for i in range(0, 3):
self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[
'bigSize'])
self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],
Params['NA'], self.subParams['wLen'][i])
print(Params['NA'], self.subParams['wLen'][i], Params['mag'],
Params['ps'], Params['smallSize'])
self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.
subParams['fRApprox'][i])
self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,
:, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],
self.subParams['fRApprox'][i])
self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])
self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])
for i in range(3):
self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([
Params['divisor'], Params['divisor']], i)
def initS0(self, img, size):
""" Initialises the FT of the high res image by linear interpolation of a low res image """
I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)
amplitude = np.sqrt(I0)
FI0 = fft2(ifftshift(amplitude))
FI0 = fftshift(FI0)
S = np.array(FI0, dtype=np.complex64)
return S
def initP0(self, size, radius):
""" Initialises the pupil function as a real circular step function of value 1 """
return h.circle(size, radius)[:, :, 0]
def fRad(self, fDu, NA, wLen):
""" Determines the approximate radius in F-space in pixels of the pupil function """
x = 2 * np.pi * NA / (wLen * fDu[0])
y = 2 * np.pi * NA / (wLen * fDu[1])
avr = np.int32(np.average([x, y]))
return avr
def initCoords(self, i, j, wLen, Rad):
""" Returns 2D array where LED coords relate to fourier centre positions """
segmentPos = [i, j]
n = self.Params['numFiles']
w = self.subParams['subSize'][0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0
] / self.Params['mag']
self.Params['centre'] = centre
coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][
1], 2), dtype=np.int32)
isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]
), dtype=np.int32)
numImgs = int(len(self.Params['images']) ** 0.5)
for i, img in enumerate(self.Params['images']):
LED = meth.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)
coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1
] = LEDPixelPos
if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2
) ** 2 < Rad:
isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs /
2) - 1] = 1
return coords, isBF
def getLEDPos(self, nx, ny, centre, wLen):
""" Determines the location of the centre of the fourier pattern in pixels """
ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.
Params['distance'])
ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.
Params['distance'])
dx = ax / (wLen * self.Params['fResolution'][0])
dy = ay / (wLen * self.Params['fResolution'][1])
pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.
subParams['subSize'][0] / 2)]
return pos
class splitImage:
def __init__(self, dir, imgName, numSplits, splitSize):
self.LEDPos = meth.getLED(imgName)
self.subImg = np.empty([numSplits, numSplits], dtype=subImage)
for i in range(numSplits):
for j in range(numSplits):
self.subImg[i, j] = subImage(dir, splitSize, imgName, self.
LEDPos, i, j)
class subImage:
def __init__(self, dir, splitSize, imgName, LEDPos, i, j):
img = meth.readImage(dir, imgName)
self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(
j + 1) * splitSize]
self.imgPos = [i, j]
self.LEDPos = LEDPos
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import numpy as np
import cv2 as cv
import methods as meth
from numpy.fft import fft2, fftshift, ifft2, ifftshift
import pandas
import os
import noGPU as h
import matplotlib.pyplot as plt
class fullSys():
def __init__(self, dir, file, size, line):
csv_reader = pandas.read_csv(file, index_col='Objective')
self.Params = {}
self.Params['mag'] = csv_reader['Magnification'][line]
self.Params['NA'] = csv_reader['NA'][line]
self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader['Pixel Size y'][line]]
self.Params['distance'] = csv_reader['Screen Distance'][line]
self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]
self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader['Num LED x'][line]]
self.Params['dir'] = dir
self.Params['images'] = os.listdir(dir)
self.Params['numImgs'] = len(self.Params['images'])
self.Params['smallSize'] = meth.readImage(dir, self.Params['images'][0], colour=1, getsize=True)
self.Params['fResolution'] = self.fRes(self.Params['mag'], self.Params['smallSize'], self.Params['ps'])
print("fullSys")
## Instantiate sub Objects ##
splitSize, self.Params['lc'] = self.getSS()
img = meth.readImage(self.Params['dir'], self.Params['images'][0])
print("fullSys2")
numFiles, divisor = self.getDivisor(img, splitSize)
print("fullSys2")
self.Params['numFiles'] = numFiles
self.Params['divisor'] = divisor
self.Params['size'] = self.getSize(size, numFiles)
self.subObjs = np.empty([numFiles, numFiles], dtype=section)
print("fullSys1")
for i in range(numFiles):
for j in range(numFiles):
subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j + 1) * divisor]
self.subObjs[i, j] = section(i, j, subImg, self.Params)
h.progbar(i, numFiles, 'Initializing')
def getSS(self):
""" Determines the required subsection size based on Cittert Zernike theorem """
rho = 300e-6 # LED size
lc = 0.61*R*530/rho
size = lc*slef.Params['mag'] / self.Params['ps']
return size, lc
def getDivisor(self, img, splitSize):
imgSize = img.shape[0]
while True:
if imgSize % splitSize == 0:
divisor = splitSize
break
splitSize += 1
numFiles = int(imgSize / divisor)
return numFiles, divisor
def getSize(self, size, numSplits):
while True:
if size[0] % numSplits == 0:
break
size[0] += 1
return size[0]
def fRes(self, mag, size, ps):
""" Determines the change in spatial frequency across one pixel in F-space """
x = 2 * np.pi * mag / (size[0] * ps[0])
y = 2 * np.pi * mag / (size[1] * ps[1])
return [x, y]
class section():
def __init__(self, i0, j0, subImg, Params):
self.Params = Params
self.subParams = {}
self.subParams['wLen'] = [630e-9, 530e-9, 430e-9]
self.subParams['subSize'] = subImg.shape
self.subParams['bigSize'] = [np.int(Params['size'] / Params['numFiles'])] * 2
self.S = np.empty([self.subParams['bigSize'][0], self.subParams['bigSize'][1], 3], dtype=np.complex64)
self.P = np.empty([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)
self.meanFFT = np.zeros([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)
self.meanNum = 0
self.subParams['fRApprox'] = np.empty([3], dtype=int)
self.subParams['coords'] = np.empty([3, 16, 16, 2])
self.subParams['isBF'] = np.empty([3, 16, 16])
for i in range(0, 3):
self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams['bigSize'])
self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],
Params['NA'], self.subParams['wLen'][i])
print(Params['NA'], self.subParams['wLen'][i], Params['mag'], Params['ps'], Params['smallSize'])
self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.subParams['fRApprox'][i])
self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i, :, :] =\
self.initCoords(i0, j0, self.subParams['wLen'][i], self.subParams['fRApprox'][i])
self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])
self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])
for i in range(3):
self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([Params['divisor'], Params['divisor']], i)
def initS0(self, img, size):
""" Initialises the FT of the high res image by linear interpolation of a low res image """
I0 = cv.resize(img, (size[1], size[0]),
interpolation=cv.INTER_LINEAR) # Bilinear interpolated upsampled image
amplitude = np.sqrt(I0)
FI0 = fft2(ifftshift(amplitude))
FI0 = fftshift(FI0) # FI0.shape[0]
S = np.array(FI0, dtype=np.complex64)
return S
def initP0(self, size, radius):
""" Initialises the pupil function as a real circular step function of value 1 """
return h.circle(size, radius)[:, :, 0]
def fRad(self, fDu, NA, wLen):
""" Determines the approximate radius in F-space in pixels of the pupil function """
x = 2 * np.pi * NA / (wLen * fDu[0])
y = 2 * np.pi * NA / (wLen * fDu[1])
avr = np.int32(np.average([x, y]))
return avr
def initCoords(self, i, j, wLen, Rad):
""" Returns 2D array where LED coords relate to fourier centre positions """
segmentPos = [i, j]
n = self.Params['numFiles']
w = self.subParams['subSize'][0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0]/self.Params['mag']
self.Params['centre'] = centre
coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][1], 2), dtype=np.int32)
isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]), dtype=np.int32)
numImgs = int(len(self.Params['images']) ** 0.5)
for i, img in enumerate(self.Params['images']):
LED = meth.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)
#print("LED:", LED, "LEDPixelPos:", LEDPixelPos)
#print("LEDPos:", [LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1])
coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = LEDPixelPos
if ((LEDPixelPos[0]-w/2)**2 + (LEDPixelPos[1]-w/2)**2 < Rad):
isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = 1
return coords, isBF
def getLEDPos(self, nx, ny, centre, wLen):
""" Determines the location of the centre of the fourier pattern in pixels """
ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis
ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis
dx = ax / (wLen * self.Params['fResolution'][0])
dy = ay / (wLen * self.Params['fResolution'][1])
pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]
return pos
class splitImage():
def __init__(self, dir, imgName, numSplits, splitSize):
self.LEDPos = meth.getLED(imgName)
self.subImg = np.empty([numSplits, numSplits], dtype=subImage)
for i in range(numSplits):
for j in range(numSplits):
self.subImg[i, j] = subImage(dir, splitSize, imgName, self.LEDPos, i, j)
class subImage():
def __init__(self, dir, splitSize, imgName, LEDPos, i, j):
img = meth.readImage(dir, imgName)
self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(j + 1) * splitSize]
self.imgPos = [i, j]
self.LEDPos = LEDPos
########################################################################################################################
'''
class preProcess(objective):
def __init__(self, dir, file, size, line, colour=1):
""" Slices images into sections """
super().__init__(dir, file, size, line, colour=1)
numFiles, devisor = self.getDevisor(150)
self.genFiles(numFiles)
self.split(devisor, numFiles)
def genFiles(self, numFiles):
path = os.path.join(os.getcwd(), 'temp')
if os.path.isdir(path):
shutil.rmtree(path)
time.sleep(0.01)
os.mkdir(path)
for i in range(numFiles):
for j in range(numFiles):
folder = '%s_%s' % (str(i), str(j))
path1 = os.path.join(path, folder)
os.mkdir(path1)
def getDevisor(self, splitSize):
imgName = self.images[0]
img = self.readImage(self.dir, imgName)
imgSize = img.shape[0]
while True:
if imgSize % splitSize == 0:
devisor = splitSize
break
splitSize += 1
numFiles = int(imgSize / devisor)
return numFiles, devisor
def split(self, devisor, numFiles):
path0 = os.path.join(os.getcwd(), 'temp')
for i0, file in enumerate(self.images):
LED = self.getLED(file)
img = self.readImage(self.dir, file)
for i in range(numFiles):
for j in range(numFiles):
folder = '%s_%s' % (str(i), str(j))
path1 = os.path.join(path0, folder)
file = 'img_%s_%s_.jpg' % (str(LED[0]), str(LED[1]))
path = os.path.join(path1, file)
subImg = img[i * devisor:(i + 1) * devisor, j * devisor:(j + 1) * devisor]
cv.imwrite(path, subImg)
h.progbar(i0 * numFiles ** 2 + i * numFiles + j,
len(self.images) * numFiles ** 2, 'Slicing Images')
def initCoords(self, dir):
""" Returns 2D array where LED coords relate to fourier centre positions """
dirName = os.path.basename(dir)
segmentPos = self.getSegment(dirName)
N = len(os.listdir(dir))
n = np.sqrt(N)
w = self.smallSize[0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.ps[0]/self.mag
coords = np.empty((self.LEDNum[0], self.LEDNum[1], 2), dtype=np.int32)
for i, img in enumerate(self.images):
LED = self.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre)
coords[LED[0], LED[1]] = LEDPixelPos
return coords
'''
|
flexible
|
{
"blob_id": "e3c9487f3221ca89b9014b2e6470ca9d4dbc925a",
"index": 2239,
"step-1": "<mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n <mask token>\n <mask token>\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass fullSys:\n <mask token>\n <mask token>\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n <mask token>\n <mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass fullSys:\n\n def __init__(self, dir, file, size, line):\n csv_reader = pandas.read_csv(file, index_col='Objective')\n self.Params = {}\n self.Params['mag'] = csv_reader['Magnification'][line]\n self.Params['NA'] = csv_reader['NA'][line]\n self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader[\n 'Pixel Size y'][line]]\n self.Params['distance'] = csv_reader['Screen Distance'][line]\n self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]\n self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader[\n 'Num LED x'][line]]\n self.Params['dir'] = dir\n self.Params['images'] = os.listdir(dir)\n self.Params['numImgs'] = len(self.Params['images'])\n self.Params['smallSize'] = meth.readImage(dir, self.Params['images'\n ][0], colour=1, getsize=True)\n self.Params['fResolution'] = self.fRes(self.Params['mag'], self.\n Params['smallSize'], self.Params['ps'])\n print('fullSys')\n splitSize, self.Params['lc'] = self.getSS()\n img = meth.readImage(self.Params['dir'], self.Params['images'][0])\n print('fullSys2')\n numFiles, divisor = self.getDivisor(img, splitSize)\n print('fullSys2')\n self.Params['numFiles'] = numFiles\n self.Params['divisor'] = divisor\n self.Params['size'] = self.getSize(size, numFiles)\n self.subObjs = np.empty([numFiles, numFiles], dtype=section)\n print('fullSys1')\n for i in range(numFiles):\n for j in range(numFiles):\n subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j +\n 1) * divisor]\n self.subObjs[i, j] = section(i, j, subImg, self.Params)\n h.progbar(i, numFiles, 'Initializing')\n\n def getSS(self):\n \"\"\" Determines the required subsection size based on Cittert Zernike theorem \"\"\"\n rho = 0.0003\n lc = 0.61 * R * 530 / rho\n size = lc * slef.Params['mag'] / self.Params['ps']\n return size, lc\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n <mask token>\n <mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-5": "import numpy as np\nimport cv2 as cv\nimport methods as meth\nfrom numpy.fft import fft2, fftshift, ifft2, ifftshift\nimport pandas\nimport os\nimport noGPU as h\nimport matplotlib.pyplot as plt\n\nclass fullSys():\n def __init__(self, dir, file, size, line):\n csv_reader = pandas.read_csv(file, index_col='Objective')\n self.Params = {}\n self.Params['mag'] = csv_reader['Magnification'][line]\n self.Params['NA'] = csv_reader['NA'][line]\n self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader['Pixel Size y'][line]]\n self.Params['distance'] = csv_reader['Screen Distance'][line]\n self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]\n self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader['Num LED x'][line]]\n self.Params['dir'] = dir\n self.Params['images'] = os.listdir(dir)\n self.Params['numImgs'] = len(self.Params['images'])\n self.Params['smallSize'] = meth.readImage(dir, self.Params['images'][0], colour=1, getsize=True)\n self.Params['fResolution'] = self.fRes(self.Params['mag'], self.Params['smallSize'], self.Params['ps'])\n print(\"fullSys\")\n\n ## Instantiate sub Objects ##\n\n splitSize, self.Params['lc'] = self.getSS()\n img = meth.readImage(self.Params['dir'], self.Params['images'][0])\n print(\"fullSys2\")\n\n numFiles, divisor = self.getDivisor(img, splitSize)\n print(\"fullSys2\")\n\n self.Params['numFiles'] = numFiles\n self.Params['divisor'] = divisor\n self.Params['size'] = self.getSize(size, numFiles)\n\n self.subObjs = np.empty([numFiles, numFiles], dtype=section)\n print(\"fullSys1\")\n\n for i in range(numFiles):\n for j in range(numFiles):\n subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j + 1) * divisor]\n self.subObjs[i, j] = section(i, j, subImg, self.Params)\n h.progbar(i, numFiles, 'Initializing')\n\n\n def getSS(self):\n \"\"\" Determines the required subsection size based on Cittert Zernike theorem \"\"\"\n rho = 300e-6 # LED size\n lc = 0.61*R*530/rho\n size = lc*slef.Params['mag'] / self.Params['ps']\n return size, lc\n\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n\n\n def getSize(self, size, numSplits):\n while True:\n if size[0] % numSplits == 0:\n break\n size[0] += 1\n return size[0]\n\n\n def fRes(self, mag, size, ps):\n \"\"\" Determines the change in spatial frequency across one pixel in F-space \"\"\"\n x = 2 * np.pi * mag / (size[0] * ps[0])\n y = 2 * np.pi * mag / (size[1] * ps[1])\n return [x, y]\n\n\nclass section():\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [630e-9, 530e-9, 430e-9]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params['numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams['bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams['bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'], Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i, :, :] =\\\n self.initCoords(i0, j0, self.subParams['wLen'][i], self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([Params['divisor'], Params['divisor']], i)\n\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n\n I0 = cv.resize(img, (size[1], size[0]),\n interpolation=cv.INTER_LINEAR) # Bilinear interpolated upsampled image\n\n amplitude = np.sqrt(I0)\n\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0) # FI0.shape[0]\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0]/self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n #print(\"LED:\", LED, \"LEDPixelPos:\", LEDPixelPos)\n #print(\"LEDPos:\", [LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1])\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = LEDPixelPos\n if ((LEDPixelPos[0]-w/2)**2 + (LEDPixelPos[1]-w/2)**2 < Rad):\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = 1\n return coords, isBF\n\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage():\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.LEDPos, i, j)\n\n\nclass subImage():\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n\n\n\n\n########################################################################################################################\n'''\nclass preProcess(objective):\n def __init__(self, dir, file, size, line, colour=1):\n \"\"\" Slices images into sections \"\"\"\n super().__init__(dir, file, size, line, colour=1)\n numFiles, devisor = self.getDevisor(150)\n self.genFiles(numFiles)\n self.split(devisor, numFiles)\n\n\n def genFiles(self, numFiles):\n path = os.path.join(os.getcwd(), 'temp')\n if os.path.isdir(path):\n shutil.rmtree(path)\n time.sleep(0.01)\n os.mkdir(path)\n for i in range(numFiles):\n for j in range(numFiles):\n folder = '%s_%s' % (str(i), str(j))\n path1 = os.path.join(path, folder)\n os.mkdir(path1)\n\n\n def getDevisor(self, splitSize):\n imgName = self.images[0]\n img = self.readImage(self.dir, imgName)\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n devisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / devisor)\n return numFiles, devisor\n\n\n def split(self, devisor, numFiles):\n path0 = os.path.join(os.getcwd(), 'temp')\n for i0, file in enumerate(self.images):\n LED = self.getLED(file)\n img = self.readImage(self.dir, file)\n for i in range(numFiles):\n for j in range(numFiles):\n folder = '%s_%s' % (str(i), str(j))\n path1 = os.path.join(path0, folder)\n file = 'img_%s_%s_.jpg' % (str(LED[0]), str(LED[1]))\n path = os.path.join(path1, file)\n subImg = img[i * devisor:(i + 1) * devisor, j * devisor:(j + 1) * devisor]\n cv.imwrite(path, subImg)\n h.progbar(i0 * numFiles ** 2 + i * numFiles + j,\n len(self.images) * numFiles ** 2, 'Slicing Images')\n\n\n\n def initCoords(self, dir):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n dirName = os.path.basename(dir)\n segmentPos = self.getSegment(dirName)\n N = len(os.listdir(dir))\n n = np.sqrt(N)\n w = self.smallSize[0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.ps[0]/self.mag\n coords = np.empty((self.LEDNum[0], self.LEDNum[1], 2), dtype=np.int32)\n for i, img in enumerate(self.images):\n LED = self.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre)\n coords[LED[0], LED[1]] = LEDPixelPos\n return coords\n'''",
"step-ids": [
9,
11,
13,
15,
19
]
}
|
[
9,
11,
13,
15,
19
] |
from locals import *
from random import choice, randint
import pygame
from gameobjects.vector2 import Vector2
from entity.block import Block
def loadImage(filename):
return pygame.image.load(filename).convert_alpha()
class MapGrid(object):
def __init__(self, world):
self.grid = []
self.images = map(lambda f: loadImage("images/" + f), [
"tree1.png",
"tree2.png",
"tree3.png",
"tree4.png",
"tree5.png",
"tree6.png",
"tree8.png",
"tree9.png",
"tree10.png"])
print self.images
for line_num in xrange(WORLD_SIZE[1]):
line = []
y = line_num * BLOCK_SIZE
for cell in xrange(WORLD_SIZE[0]):
on_edge = False
if cell==0 or cell==WORLD_SIZE[0]-1:
on_edge = True
if line_num==0 or line_num==WORLD_SIZE[1]-1:
on_edge = True
if on_edge or randint(0, 99) < 5:
x = cell * BLOCK_SIZE
block = Block(world, choice(self.images))
image_size = block.image.get_size()
block.location = Vector2(x+image_size[0]/2, y+BLOCK_SIZE)
line.append(block)
else:
line.append(None)
self.grid.append(line)
def getBlock(self, x, y):
if x<0 or x>=WORLD_SIZE[0] or y<0 or y>=WORLD_SIZE[1]:
return None
return self.grid[y][x]
def render(self, line_num, surface, offset):
start_index = min(int(offset.x-64) / BLOCK_SIZE, WORLD_SIZE[0])
start_index = max(0, start_index)
end_index = min(start_index + 12, WORLD_SIZE[0])
line = self.grid[line_num]
for cell in xrange(start_index, end_index):
if line[cell]:
line[cell].render(surface, offset)
|
normal
|
{
"blob_id": "2b8f4e0c86adfbf0d4ae57f32fa244eb088f2cee",
"index": 4773,
"step-1": "\nfrom locals import *\nfrom random import choice, randint\n\nimport pygame\n\nfrom gameobjects.vector2 import Vector2\n\nfrom entity.block import Block\n\ndef loadImage(filename):\n return pygame.image.load(filename).convert_alpha()\n\nclass MapGrid(object):\n def __init__(self, world):\n self.grid = []\n self.images = map(lambda f: loadImage(\"images/\" + f), [\n \"tree1.png\",\n \"tree2.png\",\n \"tree3.png\",\n \"tree4.png\",\n \"tree5.png\",\n \"tree6.png\",\n \"tree8.png\",\n \"tree9.png\",\n \"tree10.png\"])\n print self.images\n\n for line_num in xrange(WORLD_SIZE[1]):\n line = []\n y = line_num * BLOCK_SIZE\n for cell in xrange(WORLD_SIZE[0]):\n on_edge = False\n if cell==0 or cell==WORLD_SIZE[0]-1:\n on_edge = True\n if line_num==0 or line_num==WORLD_SIZE[1]-1:\n on_edge = True\n\n if on_edge or randint(0, 99) < 5:\n x = cell * BLOCK_SIZE\n block = Block(world, choice(self.images))\n image_size = block.image.get_size()\n block.location = Vector2(x+image_size[0]/2, y+BLOCK_SIZE)\n line.append(block)\n else:\n line.append(None)\n self.grid.append(line)\n \n def getBlock(self, x, y):\n if x<0 or x>=WORLD_SIZE[0] or y<0 or y>=WORLD_SIZE[1]:\n return None\n return self.grid[y][x]\n\n def render(self, line_num, surface, offset):\n start_index = min(int(offset.x-64) / BLOCK_SIZE, WORLD_SIZE[0])\n start_index = max(0, start_index)\n end_index = min(start_index + 12, WORLD_SIZE[0])\n line = self.grid[line_num]\n for cell in xrange(start_index, end_index):\n if line[cell]:\n line[cell].render(surface, offset)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from PIL import Image
from random import randrange
class PileMosaic:
def __init__(self):
self.width, self.height = 2380, 2800
self.filename = "pile_mosaic.png"
self.crema = (240, 233, 227)
self.choco = (89, 62, 53)
self.luna = (43, 97, 123)
self.latte = (195, 175, 148)
self.piscina = (170, 200, 211)
self.lavanda = (189, 192, 209)
self.viola = (133, 108, 140)
self.morado = (121, 69, 92)
self.rosa = (222, 179, 172)
self.flamingo = (238, 157, 140)
self.color_tuple = (self.crema, self.choco, self.luna, self.latte, self.piscina)
# self.color_tuple = (self.lavanda, self.viola, self.rosa, self.morado, self.flamingo)
self.tile_width = 300
self.tile_height = 100
def create_new_image(self):
self.image = Image.new("RGB", (self.width, self.height), "white")
self.data = [(255, 255, 255)]*(self.width*self.height)
def write_image(self):
self.image.save(self.filename, "PNG")
def hex_to_rgb(value):
value = value.lstrip('#')
lv = len(value)
return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def place_pile(self, color, x=0, y=0):
for i in range(self.tile_width):
for j in range(self.tile_height):
self.image.im.putpixel((x + i, y + j), color)
def fill_random(self):
for x in range(self.width / self.tile_width):
for y in range(self.height / self.tile_height):
current_color = randrange(5)
self.place_pile(self.color_tuple[current_color], x=x*self.tile_width, y=y*self.tile_height)
def create_random_pattern(self):
initial_pattern = []
for x in range(self.width / self.tile_width):
initial_pattern.append([])
for y in range(self.height / self.tile_height):
temp_list = list(self.color_tuple)
if x - 1 >= 0:
try:
temp_list.remove(initial_pattern[x - 1][y])
except ValueError:
pass
if y - 1 >= 0:
try:
temp_list.remove(initial_pattern[x][y - 1])
except ValueError:
pass
initial_pattern[x].append(temp_list[randrange(len(temp_list))])
return initial_pattern
def fill(self, pattern):
for x in range(self.width / (self.tile_width + 4)):
for y in range(self.height / (self.tile_height + 4)):
self.place_pile(pattern[x][y], x=x*(self.tile_width+4), y=y*(self.tile_height+4))
pile = PileMosaic()
pile.create_new_image()
pile.fill(pile.create_random_pattern())
pile.write_image()
|
normal
|
{
"blob_id": "a484272ace089008e27f4e00d2e641118432665e",
"index": 4592,
"step-1": "<mask token>\n\n\nclass PileMosaic:\n\n def __init__(self):\n self.width, self.height = 2380, 2800\n self.filename = 'pile_mosaic.png'\n self.crema = 240, 233, 227\n self.choco = 89, 62, 53\n self.luna = 43, 97, 123\n self.latte = 195, 175, 148\n self.piscina = 170, 200, 211\n self.lavanda = 189, 192, 209\n self.viola = 133, 108, 140\n self.morado = 121, 69, 92\n self.rosa = 222, 179, 172\n self.flamingo = 238, 157, 140\n self.color_tuple = (self.crema, self.choco, self.luna, self.latte,\n self.piscina)\n self.tile_width = 300\n self.tile_height = 100\n\n def create_new_image(self):\n self.image = Image.new('RGB', (self.width, self.height), 'white')\n self.data = [(255, 255, 255)] * (self.width * self.height)\n <mask token>\n\n def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv //\n 3))\n\n def rgb_to_hex(rgb):\n return '#%02x%02x%02x' % rgb\n\n def place_pile(self, color, x=0, y=0):\n for i in range(self.tile_width):\n for j in range(self.tile_height):\n self.image.im.putpixel((x + i, y + j), color)\n\n def fill_random(self):\n for x in range(self.width / self.tile_width):\n for y in range(self.height / self.tile_height):\n current_color = randrange(5)\n self.place_pile(self.color_tuple[current_color], x=x * self\n .tile_width, y=y * self.tile_height)\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass PileMosaic:\n\n def __init__(self):\n self.width, self.height = 2380, 2800\n self.filename = 'pile_mosaic.png'\n self.crema = 240, 233, 227\n self.choco = 89, 62, 53\n self.luna = 43, 97, 123\n self.latte = 195, 175, 148\n self.piscina = 170, 200, 211\n self.lavanda = 189, 192, 209\n self.viola = 133, 108, 140\n self.morado = 121, 69, 92\n self.rosa = 222, 179, 172\n self.flamingo = 238, 157, 140\n self.color_tuple = (self.crema, self.choco, self.luna, self.latte,\n self.piscina)\n self.tile_width = 300\n self.tile_height = 100\n\n def create_new_image(self):\n self.image = Image.new('RGB', (self.width, self.height), 'white')\n self.data = [(255, 255, 255)] * (self.width * self.height)\n <mask token>\n\n def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv //\n 3))\n\n def rgb_to_hex(rgb):\n return '#%02x%02x%02x' % rgb\n\n def place_pile(self, color, x=0, y=0):\n for i in range(self.tile_width):\n for j in range(self.tile_height):\n self.image.im.putpixel((x + i, y + j), color)\n\n def fill_random(self):\n for x in range(self.width / self.tile_width):\n for y in range(self.height / self.tile_height):\n current_color = randrange(5)\n self.place_pile(self.color_tuple[current_color], x=x * self\n .tile_width, y=y * self.tile_height)\n\n def create_random_pattern(self):\n initial_pattern = []\n for x in range(self.width / self.tile_width):\n initial_pattern.append([])\n for y in range(self.height / self.tile_height):\n temp_list = list(self.color_tuple)\n if x - 1 >= 0:\n try:\n temp_list.remove(initial_pattern[x - 1][y])\n except ValueError:\n pass\n if y - 1 >= 0:\n try:\n temp_list.remove(initial_pattern[x][y - 1])\n except ValueError:\n pass\n initial_pattern[x].append(temp_list[randrange(len(temp_list))])\n return initial_pattern\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass PileMosaic:\n\n def __init__(self):\n self.width, self.height = 2380, 2800\n self.filename = 'pile_mosaic.png'\n self.crema = 240, 233, 227\n self.choco = 89, 62, 53\n self.luna = 43, 97, 123\n self.latte = 195, 175, 148\n self.piscina = 170, 200, 211\n self.lavanda = 189, 192, 209\n self.viola = 133, 108, 140\n self.morado = 121, 69, 92\n self.rosa = 222, 179, 172\n self.flamingo = 238, 157, 140\n self.color_tuple = (self.crema, self.choco, self.luna, self.latte,\n self.piscina)\n self.tile_width = 300\n self.tile_height = 100\n\n def create_new_image(self):\n self.image = Image.new('RGB', (self.width, self.height), 'white')\n self.data = [(255, 255, 255)] * (self.width * self.height)\n\n def write_image(self):\n self.image.save(self.filename, 'PNG')\n\n def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv //\n 3))\n\n def rgb_to_hex(rgb):\n return '#%02x%02x%02x' % rgb\n\n def place_pile(self, color, x=0, y=0):\n for i in range(self.tile_width):\n for j in range(self.tile_height):\n self.image.im.putpixel((x + i, y + j), color)\n\n def fill_random(self):\n for x in range(self.width / self.tile_width):\n for y in range(self.height / self.tile_height):\n current_color = randrange(5)\n self.place_pile(self.color_tuple[current_color], x=x * self\n .tile_width, y=y * self.tile_height)\n\n def create_random_pattern(self):\n initial_pattern = []\n for x in range(self.width / self.tile_width):\n initial_pattern.append([])\n for y in range(self.height / self.tile_height):\n temp_list = list(self.color_tuple)\n if x - 1 >= 0:\n try:\n temp_list.remove(initial_pattern[x - 1][y])\n except ValueError:\n pass\n if y - 1 >= 0:\n try:\n temp_list.remove(initial_pattern[x][y - 1])\n except ValueError:\n pass\n initial_pattern[x].append(temp_list[randrange(len(temp_list))])\n return initial_pattern\n\n def fill(self, pattern):\n for x in range(self.width / (self.tile_width + 4)):\n for y in range(self.height / (self.tile_height + 4)):\n self.place_pile(pattern[x][y], x=x * (self.tile_width + 4),\n y=y * (self.tile_height + 4))\n\n\npile = PileMosaic()\npile.create_new_image()\npile.fill(pile.create_random_pattern())\npile.write_image()\n",
"step-4": "from PIL import Image\nfrom random import randrange\n\n\nclass PileMosaic:\n\n def __init__(self):\n self.width, self.height = 2380, 2800\n self.filename = 'pile_mosaic.png'\n self.crema = 240, 233, 227\n self.choco = 89, 62, 53\n self.luna = 43, 97, 123\n self.latte = 195, 175, 148\n self.piscina = 170, 200, 211\n self.lavanda = 189, 192, 209\n self.viola = 133, 108, 140\n self.morado = 121, 69, 92\n self.rosa = 222, 179, 172\n self.flamingo = 238, 157, 140\n self.color_tuple = (self.crema, self.choco, self.luna, self.latte,\n self.piscina)\n self.tile_width = 300\n self.tile_height = 100\n\n def create_new_image(self):\n self.image = Image.new('RGB', (self.width, self.height), 'white')\n self.data = [(255, 255, 255)] * (self.width * self.height)\n\n def write_image(self):\n self.image.save(self.filename, 'PNG')\n\n def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv //\n 3))\n\n def rgb_to_hex(rgb):\n return '#%02x%02x%02x' % rgb\n\n def place_pile(self, color, x=0, y=0):\n for i in range(self.tile_width):\n for j in range(self.tile_height):\n self.image.im.putpixel((x + i, y + j), color)\n\n def fill_random(self):\n for x in range(self.width / self.tile_width):\n for y in range(self.height / self.tile_height):\n current_color = randrange(5)\n self.place_pile(self.color_tuple[current_color], x=x * self\n .tile_width, y=y * self.tile_height)\n\n def create_random_pattern(self):\n initial_pattern = []\n for x in range(self.width / self.tile_width):\n initial_pattern.append([])\n for y in range(self.height / self.tile_height):\n temp_list = list(self.color_tuple)\n if x - 1 >= 0:\n try:\n temp_list.remove(initial_pattern[x - 1][y])\n except ValueError:\n pass\n if y - 1 >= 0:\n try:\n temp_list.remove(initial_pattern[x][y - 1])\n except ValueError:\n pass\n initial_pattern[x].append(temp_list[randrange(len(temp_list))])\n return initial_pattern\n\n def fill(self, pattern):\n for x in range(self.width / (self.tile_width + 4)):\n for y in range(self.height / (self.tile_height + 4)):\n self.place_pile(pattern[x][y], x=x * (self.tile_width + 4),\n y=y * (self.tile_height + 4))\n\n\npile = PileMosaic()\npile.create_new_image()\npile.fill(pile.create_random_pattern())\npile.write_image()\n",
"step-5": "from PIL import Image\nfrom random import randrange\n\nclass PileMosaic:\n def __init__(self):\n self.width, self.height = 2380, 2800\n self.filename = \"pile_mosaic.png\"\n self.crema = (240, 233, 227)\n self.choco = (89, 62, 53)\n self.luna = (43, 97, 123)\n self.latte = (195, 175, 148)\n self.piscina = (170, 200, 211)\n self.lavanda = (189, 192, 209)\n self.viola = (133, 108, 140)\n self.morado = (121, 69, 92)\n self.rosa = (222, 179, 172)\n self.flamingo = (238, 157, 140)\n self.color_tuple = (self.crema, self.choco, self.luna, self.latte, self.piscina)\n # self.color_tuple = (self.lavanda, self.viola, self.rosa, self.morado, self.flamingo)\n self.tile_width = 300\n self.tile_height = 100\n\n def create_new_image(self):\n self.image = Image.new(\"RGB\", (self.width, self.height), \"white\")\n self.data = [(255, 255, 255)]*(self.width*self.height)\n\n def write_image(self):\n self.image.save(self.filename, \"PNG\")\n\n def hex_to_rgb(value):\n value = value.lstrip('#')\n lv = len(value)\n return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3))\n\n def rgb_to_hex(rgb):\n return '#%02x%02x%02x' % rgb\n\n def place_pile(self, color, x=0, y=0):\n for i in range(self.tile_width):\n for j in range(self.tile_height):\n self.image.im.putpixel((x + i, y + j), color)\n\n def fill_random(self):\n for x in range(self.width / self.tile_width):\n for y in range(self.height / self.tile_height):\n current_color = randrange(5)\n self.place_pile(self.color_tuple[current_color], x=x*self.tile_width, y=y*self.tile_height)\n\n def create_random_pattern(self):\n initial_pattern = []\n for x in range(self.width / self.tile_width):\n initial_pattern.append([])\n for y in range(self.height / self.tile_height):\n temp_list = list(self.color_tuple)\n if x - 1 >= 0:\n try:\n temp_list.remove(initial_pattern[x - 1][y])\n except ValueError:\n pass\n if y - 1 >= 0:\n try:\n temp_list.remove(initial_pattern[x][y - 1])\n except ValueError:\n pass\n initial_pattern[x].append(temp_list[randrange(len(temp_list))])\n return initial_pattern\n \n def fill(self, pattern):\n for x in range(self.width / (self.tile_width + 4)):\n for y in range(self.height / (self.tile_height + 4)):\n self.place_pile(pattern[x][y], x=x*(self.tile_width+4), y=y*(self.tile_height+4))\n \n\npile = PileMosaic()\npile.create_new_image()\npile.fill(pile.create_random_pattern())\npile.write_image()\n",
"step-ids": [
7,
8,
12,
13,
14
]
}
|
[
7,
8,
12,
13,
14
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(my_randoms)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
my_randoms = random.sample(100, 10)
print(my_randoms)
<|reserved_special_token_1|>
import random
my_randoms = random.sample(100, 10)
print(my_randoms)
|
flexible
|
{
"blob_id": "d39f6fca80f32a4d13764eb5cfb29999785b1d16",
"index": 1629,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(my_randoms)\n",
"step-3": "<mask token>\nmy_randoms = random.sample(100, 10)\nprint(my_randoms)\n",
"step-4": "import random\nmy_randoms = random.sample(100, 10)\nprint(my_randoms)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('blog', '0013_auto_20191215_1619')]
operations = [migrations.AlterField(model_name='categorie', name=
'utimestamp', field=models.DateTimeField(default=datetime.datetime(
2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc))), migrations.
AlterField(model_name='post', name='create_date', field=models.
DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14,
657811, tzinfo=utc))), migrations.AlterField(model_name='tag', name
='utimestamp', field=models.DateTimeField(default=datetime.datetime
(2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)))]
<|reserved_special_token_1|>
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [('blog', '0013_auto_20191215_1619')]
operations = [migrations.AlterField(model_name='categorie', name=
'utimestamp', field=models.DateTimeField(default=datetime.datetime(
2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc))), migrations.
AlterField(model_name='post', name='create_date', field=models.
DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14,
657811, tzinfo=utc))), migrations.AlterField(model_name='tag', name
='utimestamp', field=models.DateTimeField(default=datetime.datetime
(2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)))]
<|reserved_special_token_1|>
# Generated by Django 3.0 on 2019-12-15 16:20
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0013_auto_20191215_1619'),
]
operations = [
migrations.AlterField(
model_name='categorie',
name='utimestamp',
field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc)),
),
migrations.AlterField(
model_name='post',
name='create_date',
field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 657811, tzinfo=utc)),
),
migrations.AlterField(
model_name='tag',
name='utimestamp',
field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)),
),
]
|
flexible
|
{
"blob_id": "38a79f5b3ce1beb3dc1758880d42ceabc800ece7",
"index": 8818,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0013_auto_20191215_1619')]\n operations = [migrations.AlterField(model_name='categorie', name=\n 'utimestamp', field=models.DateTimeField(default=datetime.datetime(\n 2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc))), migrations.\n AlterField(model_name='post', name='create_date', field=models.\n DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, \n 657811, tzinfo=utc))), migrations.AlterField(model_name='tag', name\n ='utimestamp', field=models.DateTimeField(default=datetime.datetime\n (2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)))]\n",
"step-4": "import datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0013_auto_20191215_1619')]\n operations = [migrations.AlterField(model_name='categorie', name=\n 'utimestamp', field=models.DateTimeField(default=datetime.datetime(\n 2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc))), migrations.\n AlterField(model_name='post', name='create_date', field=models.\n DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, \n 657811, tzinfo=utc))), migrations.AlterField(model_name='tag', name\n ='utimestamp', field=models.DateTimeField(default=datetime.datetime\n (2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)))]\n",
"step-5": "# Generated by Django 3.0 on 2019-12-15 16:20\n\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0013_auto_20191215_1619'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='categorie',\n name='utimestamp',\n field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 660603, tzinfo=utc)),\n ),\n migrations.AlterField(\n model_name='post',\n name='create_date',\n field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 657811, tzinfo=utc)),\n ),\n migrations.AlterField(\n model_name='tag',\n name='utimestamp',\n field=models.DateTimeField(default=datetime.datetime(2019, 12, 15, 16, 20, 14, 663436, tzinfo=utc)),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from flask import Blueprint, render_template
from bashtube import cache
singlevideos = Blueprint('singlevideos', __name__, template_folder='templates')
@singlevideos.route('/')
def index():
return render_template('singlevideos/single.html')
|
normal
|
{
"blob_id": "ee10bca1126b20378c4e9cea4d2dc7ed6a2044ab",
"index": 9187,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('singlevideos/single.html')\n",
"step-3": "<mask token>\nsinglevideos = Blueprint('singlevideos', __name__, template_folder='templates')\n\n\[email protected]('/')\ndef index():\n return render_template('singlevideos/single.html')\n",
"step-4": "from flask import Blueprint, render_template\nfrom bashtube import cache\nsinglevideos = Blueprint('singlevideos', __name__, template_folder='templates')\n\n\[email protected]('/')\ndef index():\n return render_template('singlevideos/single.html')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def plot_table(timestamps: dict, threadList: list, mList: list) ->None:
"""Plot standard deviation chart
Args:
k (list): Threads/Process used
deviation (list): Standard deviation of the timestamps
label (str): "Threads" or "Processos"
"""
plt.plot(threadList, timestamps.values(), 'o-')
plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(
0.5, 0.0, 0.5, 0.5))
plt.xlabel('Número de processos')
plt.ylabel('Tempo de Execução (s)')
plt.title('Tempo de Execução por Total de Processos e Valores')
plt.show()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_list(length: int) ->list:
"""Generate a list with given length with random integer values in the interval [0, length]
Args:
length (int): List length
Returns:
list: List generated with random values
"""
return [randint(0, length + 1) for _ in range(length)]
def plot_table(timestamps: dict, threadList: list, mList: list) ->None:
"""Plot standard deviation chart
Args:
k (list): Threads/Process used
deviation (list): Standard deviation of the timestamps
label (str): "Threads" or "Processos"
"""
plt.plot(threadList, timestamps.values(), 'o-')
plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(
0.5, 0.0, 0.5, 0.5))
plt.xlabel('Número de processos')
plt.ylabel('Tempo de Execução (s)')
plt.title('Tempo de Execução por Total de Processos e Valores')
plt.show()
<|reserved_special_token_1|>
from random import randint
import matplotlib.pyplot as plt
def generate_list(length: int) ->list:
"""Generate a list with given length with random integer values in the interval [0, length]
Args:
length (int): List length
Returns:
list: List generated with random values
"""
return [randint(0, length + 1) for _ in range(length)]
def plot_table(timestamps: dict, threadList: list, mList: list) ->None:
"""Plot standard deviation chart
Args:
k (list): Threads/Process used
deviation (list): Standard deviation of the timestamps
label (str): "Threads" or "Processos"
"""
plt.plot(threadList, timestamps.values(), 'o-')
plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(
0.5, 0.0, 0.5, 0.5))
plt.xlabel('Número de processos')
plt.ylabel('Tempo de Execução (s)')
plt.title('Tempo de Execução por Total de Processos e Valores')
plt.show()
<|reserved_special_token_1|>
from random import randint
import matplotlib.pyplot as plt
def generate_list(length: int) -> list:
"""Generate a list with given length with random integer values in the interval [0, length]
Args:
length (int): List length
Returns:
list: List generated with random values
"""
return [randint(0, length + 1) for _ in range(length)]
def plot_table(timestamps: dict, threadList: list, mList: list) -> None:
"""Plot standard deviation chart
Args:
k (list): Threads/Process used
deviation (list): Standard deviation of the timestamps
label (str): "Threads" or "Processos"
"""
plt.plot(threadList, timestamps.values(), 'o-')
plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))
plt.xlabel('Número de processos')
plt.ylabel('Tempo de Execução (s)')
plt.title('Tempo de Execução por Total de Processos e Valores')
plt.show()
|
flexible
|
{
"blob_id": "8804bfc5bed8b93e50279f0cbab561fe09d92a64",
"index": 6522,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef plot_table(timestamps: dict, threadList: list, mList: list) ->None:\n \"\"\"Plot standard deviation chart\n\n Args:\n k (list): Threads/Process used\n deviation (list): Standard deviation of the timestamps\n label (str): \"Threads\" or \"Processos\"\n \"\"\"\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(\n 0.5, 0.0, 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()\n",
"step-3": "<mask token>\n\n\ndef generate_list(length: int) ->list:\n \"\"\"Generate a list with given length with random integer values in the interval [0, length]\n\n Args:\n length (int): List length\n\n Returns:\n list: List generated with random values\n \"\"\"\n return [randint(0, length + 1) for _ in range(length)]\n\n\ndef plot_table(timestamps: dict, threadList: list, mList: list) ->None:\n \"\"\"Plot standard deviation chart\n\n Args:\n k (list): Threads/Process used\n deviation (list): Standard deviation of the timestamps\n label (str): \"Threads\" or \"Processos\"\n \"\"\"\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(\n 0.5, 0.0, 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()\n",
"step-4": "from random import randint\nimport matplotlib.pyplot as plt\n\n\ndef generate_list(length: int) ->list:\n \"\"\"Generate a list with given length with random integer values in the interval [0, length]\n\n Args:\n length (int): List length\n\n Returns:\n list: List generated with random values\n \"\"\"\n return [randint(0, length + 1) for _ in range(length)]\n\n\ndef plot_table(timestamps: dict, threadList: list, mList: list) ->None:\n \"\"\"Plot standard deviation chart\n\n Args:\n k (list): Threads/Process used\n deviation (list): Standard deviation of the timestamps\n label (str): \"Threads\" or \"Processos\"\n \"\"\"\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title='Total valores', loc='best', bbox_to_anchor=(\n 0.5, 0.0, 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()\n",
"step-5": "from random import randint\nimport matplotlib.pyplot as plt\n\ndef generate_list(length: int) -> list:\n \"\"\"Generate a list with given length with random integer values in the interval [0, length]\n\n Args:\n length (int): List length\n\n Returns:\n list: List generated with random values\n \"\"\"\n\n return [randint(0, length + 1) for _ in range(length)]\n\ndef plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n \"\"\"Plot standard deviation chart\n\n Args:\n k (list): Threads/Process used\n deviation (list): Standard deviation of the timestamps\n label (str): \"Threads\" or \"Processos\"\n \"\"\"\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('twitter', '0002_tweet'),
]
operations = [
migrations.CreateModel(
name='TwitterKeys',
fields=[
('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),
('consumer_key', models.CharField(max_length=200)),
('consumer_secret', models.CharField(max_length=200)),
('access_token', models.CharField(max_length=200)),
('access_token_secret', models.CharField(max_length=200)),
('user', models.ForeignKey(to='twitter.TwitterUser')),
],
),
]
|
normal
|
{
"blob_id": "c8406db010a506b782030c5d3f84c319851e89d6",
"index": 3662,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('twitter', '0002_tweet')]\n operations = [migrations.CreateModel(name='TwitterKeys', fields=[('id',\n models.AutoField(serialize=False, primary_key=True, auto_created=\n True, verbose_name='ID')), ('consumer_key', models.CharField(\n max_length=200)), ('consumer_secret', models.CharField(max_length=\n 200)), ('access_token', models.CharField(max_length=200)), (\n 'access_token_secret', models.CharField(max_length=200)), ('user',\n models.ForeignKey(to='twitter.TwitterUser'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('twitter', '0002_tweet')]\n operations = [migrations.CreateModel(name='TwitterKeys', fields=[('id',\n models.AutoField(serialize=False, primary_key=True, auto_created=\n True, verbose_name='ID')), ('consumer_key', models.CharField(\n max_length=200)), ('consumer_secret', models.CharField(max_length=\n 200)), ('access_token', models.CharField(max_length=200)), (\n 'access_token_secret', models.CharField(max_length=200)), ('user',\n models.ForeignKey(to='twitter.TwitterUser'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('twitter', '0002_tweet'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='TwitterKeys',\n fields=[\n ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')),\n ('consumer_key', models.CharField(max_length=200)),\n ('consumer_secret', models.CharField(max_length=200)),\n ('access_token', models.CharField(max_length=200)),\n ('access_token_secret', models.CharField(max_length=200)),\n ('user', models.ForeignKey(to='twitter.TwitterUser')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
"""
复习
面向对象:考虑问题从对象的角度出发.
抽象:从多个事物中,舍弃个别的/非本质的特征(不重要),
抽出共性的本质(重要的)过程。
三大特征:
封装:将每个变化点单独分解到不同的类中。
例如:老张开车去东北
做法:定义人类,定义车类。
继承:重用现有类的功能和概念,并在此基础上进行扩展。
统一概念
例如:图形管理器,统计圆形/矩形.....面积。
做法:用图形类代表/约束,圆形/矩形..具有计算面积的方法.
多态:调用父"抽象的"方法,执行子类"具体的"方法.
重写:覆盖父类那个比较抽象的方法。
例如:图形管理器调用图形的计算面积方法
具体图形必须重写图形的计算面积方法。
继承是共性(计算面积),多态个性(长*宽 / pi *r**2)。
设计原则
开闭原则:允许增加新功能,不允许修改客户端代码.
单一职责:一个有且只有一个改变的原因.
依赖倒置:调用抽象(父),不要调用具体(子);
抽象不要依赖具体.
组合复用:如果仅仅是代码的复用,优先使用组合.
类与类关系
泛化[继承](做成爸爸)
关联(做成成员变量)
依赖(做成方法参数)
"""
|
flexible
|
{
"blob_id": "2749a262bf8da99aa340e878c15a6dba01acc38c",
"index": 7025,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\n 复习\n 面向对象:考虑问题从对象的角度出发.\n 抽象:从多个事物中,舍弃个别的/非本质的特征(不重要),\n 抽出共性的本质(重要的)过程。\n 三大特征:\n 封装:将每个变化点单独分解到不同的类中。\n 例如:老张开车去东北\n 做法:定义人类,定义车类。\n\n 继承:重用现有类的功能和概念,并在此基础上进行扩展。\n 统一概念\n 例如:图形管理器,统计圆形/矩形.....面积。\n 做法:用图形类代表/约束,圆形/矩形..具有计算面积的方法.\n\n 多态:调用父\"抽象的\"方法,执行子类\"具体的\"方法.\n 重写:覆盖父类那个比较抽象的方法。\n 例如:图形管理器调用图形的计算面积方法\n 具体图形必须重写图形的计算面积方法。\n 继承是共性(计算面积),多态个性(长*宽 / pi *r**2)。\n\n 设计原则\n 开闭原则:允许增加新功能,不允许修改客户端代码.\n 单一职责:一个有且只有一个改变的原因.\n 依赖倒置:调用抽象(父),不要调用具体(子);\n 抽象不要依赖具体.\n 组合复用:如果仅仅是代码的复用,优先使用组合.\n\n 类与类关系\n 泛化[继承](做成爸爸)\n 关联(做成成员变量)\n 依赖(做成方法参数)\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import re
import os
import pandas as pd
instruments_file = os.path.abspath("instruments.csv")
input_names_file = os.path.abspath("names.txt")
output_names_file = os.path.abspath("names.csv")
inst_name_file = os.path.abspath("name_instrument.csv")
reg_ex = '; |, |\\*|\n'
name_header = ["first_name", "last_name"]
def process_names():
"""
Opening, reading name file and building name array.
"""
with open(input_names_file, 'r') as data:
plaintext = data.read()
name_array = plaintext.split('\n')
# Final name list
final_name_list = []
# Parsing different name formats and standardizing to create csv
for name in name_array:
if len(name.split(',')) == 2:
temp_name_list = re.split(reg_ex, name)
last_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(last_name + ',' + first_name)
elif len(name.split(' ')) == 2:
final_name_list.append(name.replace(' ', ','))
elif len(name.split(' ')) == 3:
temp_name_list = re.split(' ', name)
last_name = temp_name_list.pop()
middle_name = temp_name_list.pop()
first_name = temp_name_list.pop()
final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)
else:
final_name_list.append(name)
# Writing final name list to a file
with open(output_names_file, "w") as txt_file:
txt_file.write("first_name,last_name" + "\n")
for name in final_name_list:
txt_file.write(name + "\n") # works with any number of elements in a line
names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')
|
normal
|
{
"blob_id": "8c539dbbb762717393b9a71ddca8eb3872890854",
"index": 288,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n",
"step-3": "<mask token>\ninstruments_file = os.path.abspath('instruments.csv')\ninput_names_file = os.path.abspath('names.txt')\noutput_names_file = os.path.abspath('names.csv')\ninst_name_file = os.path.abspath('name_instrument.csv')\nreg_ex = '; |, |\\\\*|\\n'\nname_header = ['first_name', 'last_name']\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n",
"step-4": "import re\nimport os\nimport pandas as pd\ninstruments_file = os.path.abspath('instruments.csv')\ninput_names_file = os.path.abspath('names.txt')\noutput_names_file = os.path.abspath('names.csv')\ninst_name_file = os.path.abspath('name_instrument.csv')\nreg_ex = '; |, |\\\\*|\\n'\nname_header = ['first_name', 'last_name']\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n final_name_list = []\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' +\n last_name)\n else:\n final_name_list.append(name)\n with open(output_names_file, 'w') as txt_file:\n txt_file.write('first_name,last_name' + '\\n')\n for name in final_name_list:\n txt_file.write(name + '\\n')\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',',\n engine='python')\n",
"step-5": "import re\nimport os\nimport pandas as pd\n\ninstruments_file = os.path.abspath(\"instruments.csv\")\ninput_names_file = os.path.abspath(\"names.txt\")\noutput_names_file = os.path.abspath(\"names.csv\")\ninst_name_file = os.path.abspath(\"name_instrument.csv\")\nreg_ex = '; |, |\\\\*|\\n'\nname_header = [\"first_name\", \"last_name\"]\n\n\ndef process_names():\n \"\"\"\n Opening, reading name file and building name array.\n \"\"\"\n with open(input_names_file, 'r') as data:\n plaintext = data.read()\n name_array = plaintext.split('\\n')\n\n # Final name list\n final_name_list = []\n\n # Parsing different name formats and standardizing to create csv\n for name in name_array:\n if len(name.split(',')) == 2:\n temp_name_list = re.split(reg_ex, name)\n last_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(last_name + ',' + first_name)\n elif len(name.split(' ')) == 2:\n final_name_list.append(name.replace(' ', ','))\n elif len(name.split(' ')) == 3:\n temp_name_list = re.split(' ', name)\n last_name = temp_name_list.pop()\n middle_name = temp_name_list.pop()\n first_name = temp_name_list.pop()\n final_name_list.append(first_name + ',' + middle_name + ' ' + last_name)\n else:\n final_name_list.append(name)\n\n # Writing final name list to a file\n with open(output_names_file, \"w\") as txt_file:\n txt_file.write(\"first_name,last_name\" + \"\\n\")\n for name in final_name_list:\n txt_file.write(name + \"\\n\") # works with any number of elements in a line\n\n names_df = pd.read_csv(output_names_file, names=name_header, sep=',', engine='python')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
This is the main script
"""
import datetime
import sqlite3
from sqlite3 import Error
import nltk.sentiment
from chatterbot import ChatBot
from pythonosc import udp_client
def _create_connection(db_file):
""" Create a database connection to the SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
# Create a new SQLite table
cur.execute("CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})"
.format(tn=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN,
time='time', ft='TEXT'))
except Error as err:
print(err)
finally:
conn.commit()
conn.close()
def _log_conversation(db_file, line):
""" Log conversation in SQLite database """
try:
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute("""INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES ("{v1}", "{v2}", "{now}")""".
format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time='time',
v1=' '.join(line.keys()), v2=' '.join(line.values()),
now=str(datetime.datetime.now())))
conn.commit()
except Error as err:
print(err)
finally:
conn.close()
def main(text):
"""This is the main function to run the CHATBOT, analyse
the responses with nltk and send OSC messages to Pure Data.
"""
# Get CHATBOT response from the user input.
bot_response = CHATBOT.get_response(text).text
print(bot_response)
# Get polarity score from CHATBOT response.
analysis = VADER_ANALYZER.polarity_scores(text)
# Change polarity score relatively to a audible frequency.
freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200
# Send OSC message, to be listened to by pd.
CLIENT.send_message("/filter", freq)
# Log conversation.
exchange = {text: bot_response}
_log_conversation("conversation.db", exchange)
if __name__ == '__main__':
# Set up database
TABLE_NAME = 'conversation_log'
INPUT_COLUMN = 'input_column'
OUTPUT_COLUMN = 'output_column'
CONVERSATION_DB = "conversation.db"
_create_connection(CONVERSATION_DB)
# Set up chatbot.
CHATBOT = ChatBot(
'Sentiment Music Bot',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer')
# Train based on the english corpus.
CHATBOT.train("chatterbot.corpus.english")
# Download lexicon for nltk.
nltk.download('vader_lexicon')
# Set up sentiment analyzer.
VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()
# Set up OSC client.
IP = 'localhost'
PORT = 9000
CLIENT = udp_client.SimpleUDPClient(IP, PORT)
# Run chatbot.
while True:
USER_RESPONSE = input("Talk ('exit' to exit): ")
if USER_RESPONSE == 'exit': # Exit on 'exit' string.
break
else:
main(USER_RESPONSE)
|
normal
|
{
"blob_id": "2b8b5b893d61d11d2795f5be96fde759256a15e8",
"index": 9741,
"step-1": "<mask token>\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn\n =TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft\n ='TEXT'))\n except Error as err:\n print(err)\n finally:\n conn.commit()\n conn.close()\n\n\n<mask token>\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n analysis = VADER_ANALYZER.polarity_scores(text)\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n CLIENT.send_message('/filter', freq)\n exchange = {text: bot_response}\n _log_conversation('conversation.db', exchange)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn\n =TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft\n ='TEXT'))\n except Error as err:\n print(err)\n finally:\n conn.commit()\n conn.close()\n\n\ndef _log_conversation(db_file, line):\n \"\"\" Log conversation in SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\n 'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES (\"{v1}\", \"{v2}\", \"{now}\")'\n .format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=\n 'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),\n now=str(datetime.datetime.now())))\n conn.commit()\n except Error as err:\n print(err)\n finally:\n conn.close()\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n analysis = VADER_ANALYZER.polarity_scores(text)\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n CLIENT.send_message('/filter', freq)\n exchange = {text: bot_response}\n _log_conversation('conversation.db', exchange)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn\n =TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft\n ='TEXT'))\n except Error as err:\n print(err)\n finally:\n conn.commit()\n conn.close()\n\n\ndef _log_conversation(db_file, line):\n \"\"\" Log conversation in SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\n 'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES (\"{v1}\", \"{v2}\", \"{now}\")'\n .format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=\n 'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),\n now=str(datetime.datetime.now())))\n conn.commit()\n except Error as err:\n print(err)\n finally:\n conn.close()\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n analysis = VADER_ANALYZER.polarity_scores(text)\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n CLIENT.send_message('/filter', freq)\n exchange = {text: bot_response}\n _log_conversation('conversation.db', exchange)\n\n\nif __name__ == '__main__':\n TABLE_NAME = 'conversation_log'\n INPUT_COLUMN = 'input_column'\n OUTPUT_COLUMN = 'output_column'\n CONVERSATION_DB = 'conversation.db'\n _create_connection(CONVERSATION_DB)\n CHATBOT = ChatBot('Sentiment Music Bot', trainer=\n 'chatterbot.trainers.ChatterBotCorpusTrainer')\n CHATBOT.train('chatterbot.corpus.english')\n nltk.download('vader_lexicon')\n VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()\n IP = 'localhost'\n PORT = 9000\n CLIENT = udp_client.SimpleUDPClient(IP, PORT)\n while True:\n USER_RESPONSE = input(\"Talk ('exit' to exit): \")\n if USER_RESPONSE == 'exit':\n break\n else:\n main(USER_RESPONSE)\n",
"step-4": "<mask token>\nimport datetime\nimport sqlite3\nfrom sqlite3 import Error\nimport nltk.sentiment\nfrom chatterbot import ChatBot\nfrom pythonosc import udp_client\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute('CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})'.format(tn\n =TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN, time='time', ft\n ='TEXT'))\n except Error as err:\n print(err)\n finally:\n conn.commit()\n conn.close()\n\n\ndef _log_conversation(db_file, line):\n \"\"\" Log conversation in SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\n 'INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES (\"{v1}\", \"{v2}\", \"{now}\")'\n .format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time=\n 'time', v1=' '.join(line.keys()), v2=' '.join(line.values()),\n now=str(datetime.datetime.now())))\n conn.commit()\n except Error as err:\n print(err)\n finally:\n conn.close()\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n analysis = VADER_ANALYZER.polarity_scores(text)\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n CLIENT.send_message('/filter', freq)\n exchange = {text: bot_response}\n _log_conversation('conversation.db', exchange)\n\n\nif __name__ == '__main__':\n TABLE_NAME = 'conversation_log'\n INPUT_COLUMN = 'input_column'\n OUTPUT_COLUMN = 'output_column'\n CONVERSATION_DB = 'conversation.db'\n _create_connection(CONVERSATION_DB)\n CHATBOT = ChatBot('Sentiment Music Bot', trainer=\n 'chatterbot.trainers.ChatterBotCorpusTrainer')\n CHATBOT.train('chatterbot.corpus.english')\n nltk.download('vader_lexicon')\n VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()\n IP = 'localhost'\n PORT = 9000\n CLIENT = udp_client.SimpleUDPClient(IP, PORT)\n while True:\n USER_RESPONSE = input(\"Talk ('exit' to exit): \")\n if USER_RESPONSE == 'exit':\n break\n else:\n main(USER_RESPONSE)\n",
"step-5": "\"\"\"\nThis is the main script\n\"\"\"\n\nimport datetime\nimport sqlite3\nfrom sqlite3 import Error\nimport nltk.sentiment\nfrom chatterbot import ChatBot\nfrom pythonosc import udp_client\n\n\ndef _create_connection(db_file):\n \"\"\" Create a database connection to the SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n\n # Create a new SQLite table\n cur.execute(\"CREATE TABLE {tn} ({r1}, {r2}, {time} {ft})\"\n .format(tn=TABLE_NAME, r1=INPUT_COLUMN, r2=OUTPUT_COLUMN,\n time='time', ft='TEXT'))\n\n except Error as err:\n print(err)\n\n finally:\n conn.commit()\n conn.close()\n\n\ndef _log_conversation(db_file, line):\n \"\"\" Log conversation in SQLite database \"\"\"\n try:\n conn = sqlite3.connect(db_file)\n cur = conn.cursor()\n cur.execute(\"\"\"INSERT INTO {tn} ({c1}, {c2}, {time}) VALUES (\"{v1}\", \"{v2}\", \"{now}\")\"\"\".\n format(tn=TABLE_NAME, c1=INPUT_COLUMN, c2=OUTPUT_COLUMN, time='time',\n v1=' '.join(line.keys()), v2=' '.join(line.values()),\n now=str(datetime.datetime.now())))\n conn.commit()\n\n except Error as err:\n print(err)\n\n finally:\n conn.close()\n\n\ndef main(text):\n \"\"\"This is the main function to run the CHATBOT, analyse\n the responses with nltk and send OSC messages to Pure Data.\n \"\"\"\n\n # Get CHATBOT response from the user input.\n bot_response = CHATBOT.get_response(text).text\n print(bot_response)\n\n # Get polarity score from CHATBOT response.\n analysis = VADER_ANALYZER.polarity_scores(text)\n\n # Change polarity score relatively to a audible frequency.\n freq = (analysis['compound'] - -1) / (1 - -1) * (800 - 200) + 200\n\n # Send OSC message, to be listened to by pd.\n CLIENT.send_message(\"/filter\", freq)\n\n # Log conversation.\n exchange = {text: bot_response}\n _log_conversation(\"conversation.db\", exchange)\n\n\nif __name__ == '__main__':\n\n # Set up database\n TABLE_NAME = 'conversation_log'\n INPUT_COLUMN = 'input_column'\n OUTPUT_COLUMN = 'output_column'\n CONVERSATION_DB = \"conversation.db\"\n _create_connection(CONVERSATION_DB)\n\n # Set up chatbot.\n CHATBOT = ChatBot(\n 'Sentiment Music Bot',\n trainer='chatterbot.trainers.ChatterBotCorpusTrainer')\n\n # Train based on the english corpus.\n CHATBOT.train(\"chatterbot.corpus.english\")\n\n # Download lexicon for nltk.\n nltk.download('vader_lexicon')\n\n # Set up sentiment analyzer.\n VADER_ANALYZER = nltk.sentiment.vader.SentimentIntensityAnalyzer()\n\n # Set up OSC client.\n IP = 'localhost'\n PORT = 9000\n CLIENT = udp_client.SimpleUDPClient(IP, PORT)\n\n # Run chatbot.\n while True:\n USER_RESPONSE = input(\"Talk ('exit' to exit): \")\n if USER_RESPONSE == 'exit': # Exit on 'exit' string.\n break\n else:\n main(USER_RESPONSE)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
class GraphNN(nn.Module):
def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.0):
super(GraphNN, self).__init__()
self.ligand_dim = dim_in
self.dim_h = dim_h
self.dim_act = dim_act
self.model_name = 'DockRLGraphNN'
self.bond_cutoff = 3.6
self.number_updates = 16
self.dropout = dropout
self.initialize_gnn()
self.reset()
my_params = self.get_params()
self.num_params = my_params.shape[0]
def initialize_gnn(self):
self.edge_model = nn.Sequential(nn.Linear(self.ligand_dim, self.
dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_h), nn.
LeakyReLU(), nn.Dropout(p=self.dropout), nn.Linear(self.dim_h,
self.ligand_dim + 2 * self.dim_h))
self.encoder = nn.Sequential(nn.Linear(2 * self.ligand_dim, self.
ligand_dim), ArcTan())
self.action_layer = nn.Sequential(nn.Linear(self.ligand_dim, self.
dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_act))
def get_distance(self, node_0, node_1):
return torch.sum(torch.sqrt(torch.abs(node_0 - node_1) ** 2))
def build_graph(self, x):
self.graph = torch.zeros(x.shape[0], x.shape[0])
for ii in range(x.shape[0]):
node_ii = x[ii, 0:3]
for jj in range(x.shape[0]):
node_jj = x[jj, 0:3]
distance = self.get_distance(node_ii, node_jj)
if distance <= self.bond_cutoff:
self.graph[ii, jj] = 1.0
self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))
def forward(self, x, return_codes=False, template=None):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
if template is not None:
self.build_graph(template.detach())
else:
self.build_graph(x.detach())
new_graph = torch.Tensor()
codes = torch.Tensor()
temp_input = [torch.Tensor()]
for kk in range(x.shape[0]):
for ll in range(x.shape[0]):
if self.graph[kk, ll]:
temp_input[-1] = torch.cat([temp_input[-1], self.
edge_model(x[ll]).unsqueeze(0)])
keys = temp_input[-1][:, -self.dim_h * 2:-self.dim_h]
queries = temp_input[-1][:, -self.dim_h:]
attention = torch.zeros(1, keys.shape[0])
for mm in range(keys.shape[0]):
attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)
attention = torch.softmax(attention, dim=1)
my_input = torch.sum(attention.T * temp_input[-1][:, :self.
ligand_dim], dim=0)
my_input = torch.cat([x[kk], my_input])
codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])
new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])
if return_codes:
return codes, new_graph
else:
return new_graph
def get_actions(self, x):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
my_template = x
for ii in range(self.number_updates):
x = self.forward(x, template=my_template)
x = torch.mean(x, dim=0)
x = self.action_layer(x)
return x
def get_params(self):
params = np.array([])
for param in self.edge_model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.encoder.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.action_layer.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.edge_model.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.encoder.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.action_layer.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MLP(nn.Module):
def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 32
self.dropout = dropout
self.model_name = 'DockRLMLP'
self.init_params()
def init_params(self):
self.model = nn.Sequential(nn.Linear(self.dim_in, self.dim_h), nn.
ReLU(), nn.Linear(self.dim_h, self.dim_h), nn.ReLU(), nn.
Dropout(p=self.dropout), nn.Linear(self.dim_h, self.dim_act))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = self.model(x)
return x
def get_actions(self, x):
act = self.forward(x)
act = torch.mean(act, dim=0, keepdim=True)
return act
def get_params(self):
params = np.array([])
for param in self.model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.model.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MRNN(nn.Module):
def __init__(self, dim_in=6, dim_act=5):
super(MRNN, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 8
self.init_params()
def init_params(self):
self.g = nn.Sequential(OrderedDict([('g', nn.Linear(self.dim_h +
self.dim_in, self.dim_h)), ('act_g', nn.Sigmoid())]))
self.j = nn.Sequential(OrderedDict([('j', nn.Linear(self.dim_h +
self.dim_in, self.dim_h)), ('act_j', nn.Tanh())]))
self.w_h2y = nn.Sequential(OrderedDict([('w_h2y', nn.Linear(self.
dim_h, self.dim_act))]))
self.cell_state = torch.zeros((1, self.dim_h))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = torch.cat((self.cell_state, x), axis=-1)
g_out = self.g(x)
j_out = (1.0 - g_out) * self.j(x)
self.cell_state = g_out * self.cell_state + j_out
y = self.w_h2y(self.cell_state)
return y
def get_action(self, x):
act = self.forward(x)
return act.detach().cpu().numpy()
def get_params(self):
params = np.array([])
for param in self.g.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.j.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.w_h2y.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.g.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.j.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.w_h2y.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
self.cell_state *= 0.0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Params:
<|reserved_special_token_0|>
def init_params(self):
self.params = np.random.randn(self.dim_act)
self.num_params = self.dim_act
def forward(self, obs):
return self.get_params()
def get_params(self):
return self.params
<|reserved_special_token_0|>
def reset(self):
pass
class GraphNN(nn.Module):
def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.0):
super(GraphNN, self).__init__()
self.ligand_dim = dim_in
self.dim_h = dim_h
self.dim_act = dim_act
self.model_name = 'DockRLGraphNN'
self.bond_cutoff = 3.6
self.number_updates = 16
self.dropout = dropout
self.initialize_gnn()
self.reset()
my_params = self.get_params()
self.num_params = my_params.shape[0]
def initialize_gnn(self):
self.edge_model = nn.Sequential(nn.Linear(self.ligand_dim, self.
dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_h), nn.
LeakyReLU(), nn.Dropout(p=self.dropout), nn.Linear(self.dim_h,
self.ligand_dim + 2 * self.dim_h))
self.encoder = nn.Sequential(nn.Linear(2 * self.ligand_dim, self.
ligand_dim), ArcTan())
self.action_layer = nn.Sequential(nn.Linear(self.ligand_dim, self.
dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_act))
def get_distance(self, node_0, node_1):
return torch.sum(torch.sqrt(torch.abs(node_0 - node_1) ** 2))
def build_graph(self, x):
self.graph = torch.zeros(x.shape[0], x.shape[0])
for ii in range(x.shape[0]):
node_ii = x[ii, 0:3]
for jj in range(x.shape[0]):
node_jj = x[jj, 0:3]
distance = self.get_distance(node_ii, node_jj)
if distance <= self.bond_cutoff:
self.graph[ii, jj] = 1.0
self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))
def forward(self, x, return_codes=False, template=None):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
if template is not None:
self.build_graph(template.detach())
else:
self.build_graph(x.detach())
new_graph = torch.Tensor()
codes = torch.Tensor()
temp_input = [torch.Tensor()]
for kk in range(x.shape[0]):
for ll in range(x.shape[0]):
if self.graph[kk, ll]:
temp_input[-1] = torch.cat([temp_input[-1], self.
edge_model(x[ll]).unsqueeze(0)])
keys = temp_input[-1][:, -self.dim_h * 2:-self.dim_h]
queries = temp_input[-1][:, -self.dim_h:]
attention = torch.zeros(1, keys.shape[0])
for mm in range(keys.shape[0]):
attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)
attention = torch.softmax(attention, dim=1)
my_input = torch.sum(attention.T * temp_input[-1][:, :self.
ligand_dim], dim=0)
my_input = torch.cat([x[kk], my_input])
codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])
new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])
if return_codes:
return codes, new_graph
else:
return new_graph
def get_actions(self, x):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
my_template = x
for ii in range(self.number_updates):
x = self.forward(x, template=my_template)
x = torch.mean(x, dim=0)
x = self.action_layer(x)
return x
def get_params(self):
params = np.array([])
for param in self.edge_model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.encoder.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.action_layer.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.edge_model.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.encoder.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.action_layer.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MLP(nn.Module):
def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 32
self.dropout = dropout
self.model_name = 'DockRLMLP'
self.init_params()
def init_params(self):
self.model = nn.Sequential(nn.Linear(self.dim_in, self.dim_h), nn.
ReLU(), nn.Linear(self.dim_h, self.dim_h), nn.ReLU(), nn.
Dropout(p=self.dropout), nn.Linear(self.dim_h, self.dim_act))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = self.model(x)
return x
def get_actions(self, x):
act = self.forward(x)
act = torch.mean(act, dim=0, keepdim=True)
return act
def get_params(self):
params = np.array([])
for param in self.model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.model.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MRNN(nn.Module):
def __init__(self, dim_in=6, dim_act=5):
super(MRNN, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 8
self.init_params()
def init_params(self):
self.g = nn.Sequential(OrderedDict([('g', nn.Linear(self.dim_h +
self.dim_in, self.dim_h)), ('act_g', nn.Sigmoid())]))
self.j = nn.Sequential(OrderedDict([('j', nn.Linear(self.dim_h +
self.dim_in, self.dim_h)), ('act_j', nn.Tanh())]))
self.w_h2y = nn.Sequential(OrderedDict([('w_h2y', nn.Linear(self.
dim_h, self.dim_act))]))
self.cell_state = torch.zeros((1, self.dim_h))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = torch.cat((self.cell_state, x), axis=-1)
g_out = self.g(x)
j_out = (1.0 - g_out) * self.j(x)
self.cell_state = g_out * self.cell_state + j_out
y = self.w_h2y(self.cell_state)
return y
def get_action(self, x):
act = self.forward(x)
return act.detach().cpu().numpy()
def get_params(self):
params = np.array([])
for param in self.g.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.j.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.w_h2y.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.g.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.j.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.w_h2y.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
self.cell_state *= 0.0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArcTan(nn.Module):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Params:
def __init__(self, dim_in=7, dim_act=6, dim_h=0, dropout=0.0):
self.dim_act = dim_act
self.dim_in = 0
self.dim_h = 0
self.dropout = 0.0
self.model_name = 'DockRLParams'
self.init_params()
self.act = ArcTan()
def init_params(self):
self.params = np.random.randn(self.dim_act)
self.num_params = self.dim_act
def forward(self, obs):
return self.get_params()
def get_params(self):
return self.params
def set_params(self, params):
assert params.shape == self.params.shape
self.params = params
def reset(self):
pass
class GraphNN(nn.Module):
def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.0):
super(GraphNN, self).__init__()
self.ligand_dim = dim_in
self.dim_h = dim_h
self.dim_act = dim_act
self.model_name = 'DockRLGraphNN'
self.bond_cutoff = 3.6
self.number_updates = 16
self.dropout = dropout
self.initialize_gnn()
self.reset()
my_params = self.get_params()
self.num_params = my_params.shape[0]
def initialize_gnn(self):
self.edge_model = nn.Sequential(nn.Linear(self.ligand_dim, self.
dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_h), nn.
LeakyReLU(), nn.Dropout(p=self.dropout), nn.Linear(self.dim_h,
self.ligand_dim + 2 * self.dim_h))
self.encoder = nn.Sequential(nn.Linear(2 * self.ligand_dim, self.
ligand_dim), ArcTan())
self.action_layer = nn.Sequential(nn.Linear(self.ligand_dim, self.
dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_act))
def get_distance(self, node_0, node_1):
return torch.sum(torch.sqrt(torch.abs(node_0 - node_1) ** 2))
def build_graph(self, x):
self.graph = torch.zeros(x.shape[0], x.shape[0])
for ii in range(x.shape[0]):
node_ii = x[ii, 0:3]
for jj in range(x.shape[0]):
node_jj = x[jj, 0:3]
distance = self.get_distance(node_ii, node_jj)
if distance <= self.bond_cutoff:
self.graph[ii, jj] = 1.0
self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))
def forward(self, x, return_codes=False, template=None):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
if template is not None:
self.build_graph(template.detach())
else:
self.build_graph(x.detach())
new_graph = torch.Tensor()
codes = torch.Tensor()
temp_input = [torch.Tensor()]
for kk in range(x.shape[0]):
for ll in range(x.shape[0]):
if self.graph[kk, ll]:
temp_input[-1] = torch.cat([temp_input[-1], self.
edge_model(x[ll]).unsqueeze(0)])
keys = temp_input[-1][:, -self.dim_h * 2:-self.dim_h]
queries = temp_input[-1][:, -self.dim_h:]
attention = torch.zeros(1, keys.shape[0])
for mm in range(keys.shape[0]):
attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)
attention = torch.softmax(attention, dim=1)
my_input = torch.sum(attention.T * temp_input[-1][:, :self.
ligand_dim], dim=0)
my_input = torch.cat([x[kk], my_input])
codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])
new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])
if return_codes:
return codes, new_graph
else:
return new_graph
def get_actions(self, x):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
my_template = x
for ii in range(self.number_updates):
x = self.forward(x, template=my_template)
x = torch.mean(x, dim=0)
x = self.action_layer(x)
return x
def get_params(self):
params = np.array([])
for param in self.edge_model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.encoder.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.action_layer.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.edge_model.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.encoder.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.action_layer.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MLP(nn.Module):
def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 32
self.dropout = dropout
self.model_name = 'DockRLMLP'
self.init_params()
def init_params(self):
self.model = nn.Sequential(nn.Linear(self.dim_in, self.dim_h), nn.
ReLU(), nn.Linear(self.dim_h, self.dim_h), nn.ReLU(), nn.
Dropout(p=self.dropout), nn.Linear(self.dim_h, self.dim_act))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = self.model(x)
return x
def get_actions(self, x):
act = self.forward(x)
act = torch.mean(act, dim=0, keepdim=True)
return act
def get_params(self):
params = np.array([])
for param in self.model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.model.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MRNN(nn.Module):
def __init__(self, dim_in=6, dim_act=5):
super(MRNN, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 8
self.init_params()
def init_params(self):
self.g = nn.Sequential(OrderedDict([('g', nn.Linear(self.dim_h +
self.dim_in, self.dim_h)), ('act_g', nn.Sigmoid())]))
self.j = nn.Sequential(OrderedDict([('j', nn.Linear(self.dim_h +
self.dim_in, self.dim_h)), ('act_j', nn.Tanh())]))
self.w_h2y = nn.Sequential(OrderedDict([('w_h2y', nn.Linear(self.
dim_h, self.dim_act))]))
self.cell_state = torch.zeros((1, self.dim_h))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = torch.cat((self.cell_state, x), axis=-1)
g_out = self.g(x)
j_out = (1.0 - g_out) * self.j(x)
self.cell_state = g_out * self.cell_state + j_out
y = self.w_h2y(self.cell_state)
return y
def get_action(self, x):
act = self.forward(x)
return act.detach().cpu().numpy()
def get_params(self):
params = np.array([])
for param in self.g.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.j.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.w_h2y.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.g.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.j.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.w_h2y.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
self.cell_state *= 0.0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ArcTan(nn.Module):
def __init__(self):
super(ArcTan, self).__init__()
<|reserved_special_token_0|>
class Params:
def __init__(self, dim_in=7, dim_act=6, dim_h=0, dropout=0.0):
self.dim_act = dim_act
self.dim_in = 0
self.dim_h = 0
self.dropout = 0.0
self.model_name = 'DockRLParams'
self.init_params()
self.act = ArcTan()
def init_params(self):
self.params = np.random.randn(self.dim_act)
self.num_params = self.dim_act
def forward(self, obs):
return self.get_params()
def get_params(self):
return self.params
def set_params(self, params):
assert params.shape == self.params.shape
self.params = params
def reset(self):
pass
class GraphNN(nn.Module):
def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.0):
super(GraphNN, self).__init__()
self.ligand_dim = dim_in
self.dim_h = dim_h
self.dim_act = dim_act
self.model_name = 'DockRLGraphNN'
self.bond_cutoff = 3.6
self.number_updates = 16
self.dropout = dropout
self.initialize_gnn()
self.reset()
my_params = self.get_params()
self.num_params = my_params.shape[0]
def initialize_gnn(self):
self.edge_model = nn.Sequential(nn.Linear(self.ligand_dim, self.
dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_h), nn.
LeakyReLU(), nn.Dropout(p=self.dropout), nn.Linear(self.dim_h,
self.ligand_dim + 2 * self.dim_h))
self.encoder = nn.Sequential(nn.Linear(2 * self.ligand_dim, self.
ligand_dim), ArcTan())
self.action_layer = nn.Sequential(nn.Linear(self.ligand_dim, self.
dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_act))
def get_distance(self, node_0, node_1):
return torch.sum(torch.sqrt(torch.abs(node_0 - node_1) ** 2))
def build_graph(self, x):
self.graph = torch.zeros(x.shape[0], x.shape[0])
for ii in range(x.shape[0]):
node_ii = x[ii, 0:3]
for jj in range(x.shape[0]):
node_jj = x[jj, 0:3]
distance = self.get_distance(node_ii, node_jj)
if distance <= self.bond_cutoff:
self.graph[ii, jj] = 1.0
self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))
def forward(self, x, return_codes=False, template=None):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
if template is not None:
self.build_graph(template.detach())
else:
self.build_graph(x.detach())
new_graph = torch.Tensor()
codes = torch.Tensor()
temp_input = [torch.Tensor()]
for kk in range(x.shape[0]):
for ll in range(x.shape[0]):
if self.graph[kk, ll]:
temp_input[-1] = torch.cat([temp_input[-1], self.
edge_model(x[ll]).unsqueeze(0)])
keys = temp_input[-1][:, -self.dim_h * 2:-self.dim_h]
queries = temp_input[-1][:, -self.dim_h:]
attention = torch.zeros(1, keys.shape[0])
for mm in range(keys.shape[0]):
attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)
attention = torch.softmax(attention, dim=1)
my_input = torch.sum(attention.T * temp_input[-1][:, :self.
ligand_dim], dim=0)
my_input = torch.cat([x[kk], my_input])
codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])
new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])
if return_codes:
return codes, new_graph
else:
return new_graph
def get_actions(self, x):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
my_template = x
for ii in range(self.number_updates):
x = self.forward(x, template=my_template)
x = torch.mean(x, dim=0)
x = self.action_layer(x)
return x
def get_params(self):
params = np.array([])
for param in self.edge_model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.encoder.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.action_layer.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.edge_model.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.encoder.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.action_layer.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MLP(nn.Module):
def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 32
self.dropout = dropout
self.model_name = 'DockRLMLP'
self.init_params()
def init_params(self):
self.model = nn.Sequential(nn.Linear(self.dim_in, self.dim_h), nn.
ReLU(), nn.Linear(self.dim_h, self.dim_h), nn.ReLU(), nn.
Dropout(p=self.dropout), nn.Linear(self.dim_h, self.dim_act))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = self.model(x)
return x
def get_actions(self, x):
act = self.forward(x)
act = torch.mean(act, dim=0, keepdim=True)
return act
def get_params(self):
params = np.array([])
for param in self.model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.model.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MRNN(nn.Module):
def __init__(self, dim_in=6, dim_act=5):
super(MRNN, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 8
self.init_params()
def init_params(self):
self.g = nn.Sequential(OrderedDict([('g', nn.Linear(self.dim_h +
self.dim_in, self.dim_h)), ('act_g', nn.Sigmoid())]))
self.j = nn.Sequential(OrderedDict([('j', nn.Linear(self.dim_h +
self.dim_in, self.dim_h)), ('act_j', nn.Tanh())]))
self.w_h2y = nn.Sequential(OrderedDict([('w_h2y', nn.Linear(self.
dim_h, self.dim_act))]))
self.cell_state = torch.zeros((1, self.dim_h))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = torch.cat((self.cell_state, x), axis=-1)
g_out = self.g(x)
j_out = (1.0 - g_out) * self.j(x)
self.cell_state = g_out * self.cell_state + j_out
y = self.w_h2y(self.cell_state)
return y
def get_action(self, x):
act = self.forward(x)
return act.detach().cpu().numpy()
def get_params(self):
params = np.array([])
for param in self.g.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.j.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.w_h2y.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params
) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.g.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.j.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
for name, param in self.w_h2y.named_parameters():
param_stop = param_start + reduce(lambda x, y: x * y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(my_params[
param_start:param_stop].reshape(param.shape)))
def reset(self):
self.cell_state *= 0.0
<|reserved_special_token_0|>
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from functools import reduce
class ArcTan(nn.Module):
def __init__(self):
super(ArcTan,self).__init__()
def forward(self, x):
return torch.arctan(x) / 1.5708
class Params():
def __init__(self, dim_in=7, dim_act=6, dim_h=0, dropout=0.0):
self.dim_act = dim_act
self.dim_in = 0
self.dim_h = 0
self.dropout = 0.0
self.model_name = "DockRLParams"
self.init_params()
self.act = ArcTan()
def init_params(self):
self.params = np.random.randn(self.dim_act)
self.num_params = self.dim_act
def forward(self, obs):
return self.get_params()
def get_params(self):
return self.params
def set_params(self, params):
assert params.shape == self.params.shape
self.params = params
def reset(self):
pass
class GraphNN(nn.Module):
def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.00):
super(GraphNN, self).__init__()
self.ligand_dim = dim_in
self.dim_h = dim_h
self.dim_act = dim_act
self.model_name = "DockRLGraphNN"
# This is a guesstimate based on:
# https://pymolwiki.org/index.php/Displaying_Biochemical_Properties
self.bond_cutoff = 3.6
self.number_updates = 16
self.dropout = dropout
self.initialize_gnn()
self.reset()
my_params = self.get_params()
self.num_params = my_params.shape[0]
def initialize_gnn(self):
# vertices MLP, with 8 element key and query vectors for self-attention
self.edge_model = nn.Sequential(\
nn.Linear(self.ligand_dim, self.dim_h),\
nn.LeakyReLU(),\
nn.Linear(self.dim_h, self.dim_h),\
nn.LeakyReLU(),\
nn.Dropout(p=self.dropout),\
nn.Linear(self.dim_h, self.ligand_dim + 2 * self.dim_h)
)
self.encoder = nn.Sequential(\
nn.Linear(2*self.ligand_dim, self.ligand_dim),\
ArcTan()
)
self.action_layer = nn.Sequential(\
nn.Linear(self.ligand_dim, self.dim_h),\
nn.LeakyReLU(),\
nn.Linear(self.dim_h, self.dim_act)\
)
def get_distance(self, node_0, node_1):
return torch.sum(torch.sqrt(torch.abs(node_0 - node_1)**2))
def build_graph(self, x):
self.graph = torch.zeros(x.shape[0],x.shape[0])
for ii in range(x.shape[0]):
node_ii = x[ii, 0:3]
for jj in range(x.shape[0]):
node_jj = x[jj, 0:3]
distance = self.get_distance(node_ii, node_jj)
if distance <= self.bond_cutoff:
self.graph[ii, jj] = 1.0
self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))
def forward(self, x, return_codes=False, template=None):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
if template is not None:
self.build_graph(template.detach())
else:
self.build_graph(x.detach())
new_graph = torch.Tensor() #torch.zeros_like(x)
codes = torch.Tensor() #torch.zeros(x.shape[0], self.dim_h)
temp_input = [torch.Tensor()]
#orch.Tensor() #torch.zeros(x.shape[0], self.dim_h+8+8)
for kk in range(x.shape[0]):
# loop through nodes for each node
for ll in range(x.shape[0]):
if self.graph[kk,ll]:
temp_input[-1] = torch.cat([temp_input[-1],\
self.edge_model(x[ll]).unsqueeze(0)])
keys = temp_input[-1][:,-self.dim_h*2:-self.dim_h]
queries = temp_input[-1][:,-self.dim_h:]
attention = torch.zeros(1, keys.shape[0])
for mm in range(keys.shape[0]):
attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)
attention = torch.softmax(attention, dim=1)
my_input = torch.sum(attention.T \
* temp_input[-1][:,:self.ligand_dim],dim=0)
my_input = torch.cat([x[kk], my_input])
#this is where the cell gating would happen (TODO)
codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])
new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])
#self.decoder(codes[-1]).unsqueeze(0)])
if return_codes:
return codes, new_graph
else:
return new_graph
def get_actions(self, x):
if type(x) != torch.Tensor:
x = torch.Tensor(x)
my_template = x
for ii in range(self.number_updates):
x = self.forward(x, template=my_template)
x = torch.mean(x, dim=0)
x = self.action_layer(x)
return x
def get_params(self):
params = np.array([])
for param in self.edge_model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.encoder.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
# for param in self.decoder.named_parameters():
# params = np.append(params, param[1].detach().numpy().ravel())
for param in self.action_layer.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.edge_model.named_parameters():
param_stop = param_start + reduce(lambda x,y: x*y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(\
my_params[param_start:param_stop].reshape(param.shape)))
for name, param in self.encoder.named_parameters():
param_stop = param_start + reduce(lambda x,y: x*y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(\
my_params[param_start:param_stop].reshape(param.shape)))
# for name, param in self.decoder.named_parameters():
#
# param_stop = param_start + reduce(lambda x,y: x*y, param.shape)
#
# param[:] = torch.nn.Parameter(torch.Tensor(\
# my_params[param_start:param_stop].reshape(param.shape)))
for name, param in self.action_layer.named_parameters():
param_stop = param_start + reduce(lambda x,y: x*y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(\
my_params[param_start:param_stop].reshape(param.shape)))
def reset(self):
# initialize using gated cell states here later (maybe)
pass
class MLP(nn.Module):
def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 32
self.dropout = dropout
self.model_name = "DockRLMLP"
self.init_params()
def init_params(self):
self.model = nn.Sequential(\
nn.Linear(self.dim_in, self.dim_h),\
nn.ReLU(),\
nn.Linear(self.dim_h, self.dim_h),\
nn.ReLU(),\
nn.Dropout(p=self.dropout),\
nn.Linear(self.dim_h, self.dim_act)\
)
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = self.model(x)
return x
def get_actions(self, x):
act = self.forward(x)
act = torch.mean(act, dim=0, keepdim=True)
return act
def get_params(self):
params = np.array([])
for param in self.model.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.model.named_parameters():
param_stop = param_start + reduce(lambda x,y: x*y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(\
my_params[param_start:param_stop].reshape(param.shape)))
def reset(self):
pass
class MRNN(nn.Module):
def __init__(self, dim_in=6, dim_act=5):
super(MRNN, self).__init__()
self.dim_in = dim_in
self.dim_act = dim_act
self.dim_h = 8
self.init_params()
def init_params(self):
self.g = nn.Sequential(OrderedDict([\
("g", nn.Linear(self.dim_h+self.dim_in, self.dim_h)),\
("act_g", nn.Sigmoid())]))
self.j = nn.Sequential(OrderedDict([\
("j", nn.Linear(self.dim_h+self.dim_in, self.dim_h)),\
("act_j", nn.Tanh())]))
self.w_h2y = nn.Sequential(OrderedDict([\
("w_h2y", nn.Linear(self.dim_h, self.dim_act))]))
self.cell_state = torch.zeros((1,self.dim_h))
self.num_params = self.get_params().shape[0]
def forward(self, x):
x = torch.Tensor(x)
if len(x.shape) == 1:
x = x.unsqueeze(0)
x = torch.cat((self.cell_state, x), axis=-1)
g_out = self.g(x)
j_out = (1.0 - g_out) * self.j(x)
self.cell_state = g_out * self.cell_state + j_out
y = self.w_h2y(self.cell_state)
return y
def get_action(self, x):
act = self.forward(x)
return act.detach().cpu().numpy()
def get_params(self):
params = np.array([])
for param in self.g.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.j.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
for param in self.w_h2y.named_parameters():
params = np.append(params, param[1].detach().numpy().ravel())
return params
def set_params(self, my_params):
if my_params is None:
my_params = self.init_mean + torch.randn(self.num_params) * torch.sqrt(torch.tensor(self.var))
param_start = 0
for name, param in self.g.named_parameters():
param_stop = param_start + reduce(lambda x,y: x*y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(\
my_params[param_start:param_stop].reshape(param.shape)))
for name, param in self.j.named_parameters():
param_stop = param_start + reduce(lambda x,y: x*y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(\
my_params[param_start:param_stop].reshape(param.shape)))
for name, param in self.w_h2y.named_parameters():
param_stop = param_start + reduce(lambda x,y: x*y, param.shape)
param[:] = torch.nn.Parameter(torch.Tensor(\
my_params[param_start:param_stop].reshape(param.shape)))
def reset(self):
self.cell_state *= 0.
if __name__ == "__main__":
mrnn = MRNN()
temp = mrnn.forward(np.random.randn(1,6))
print(temp)
|
flexible
|
{
"blob_id": "1c1673b5e54bafef9f36a2583115f8135c112ab4",
"index": 1922,
"step-1": "<mask token>\n\n\nclass GraphNN(nn.Module):\n\n def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.0):\n super(GraphNN, self).__init__()\n self.ligand_dim = dim_in\n self.dim_h = dim_h\n self.dim_act = dim_act\n self.model_name = 'DockRLGraphNN'\n self.bond_cutoff = 3.6\n self.number_updates = 16\n self.dropout = dropout\n self.initialize_gnn()\n self.reset()\n my_params = self.get_params()\n self.num_params = my_params.shape[0]\n\n def initialize_gnn(self):\n self.edge_model = nn.Sequential(nn.Linear(self.ligand_dim, self.\n dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_h), nn.\n LeakyReLU(), nn.Dropout(p=self.dropout), nn.Linear(self.dim_h, \n self.ligand_dim + 2 * self.dim_h))\n self.encoder = nn.Sequential(nn.Linear(2 * self.ligand_dim, self.\n ligand_dim), ArcTan())\n self.action_layer = nn.Sequential(nn.Linear(self.ligand_dim, self.\n dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_act))\n\n def get_distance(self, node_0, node_1):\n return torch.sum(torch.sqrt(torch.abs(node_0 - node_1) ** 2))\n\n def build_graph(self, x):\n self.graph = torch.zeros(x.shape[0], x.shape[0])\n for ii in range(x.shape[0]):\n node_ii = x[ii, 0:3]\n for jj in range(x.shape[0]):\n node_jj = x[jj, 0:3]\n distance = self.get_distance(node_ii, node_jj)\n if distance <= self.bond_cutoff:\n self.graph[ii, jj] = 1.0\n self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))\n\n def forward(self, x, return_codes=False, template=None):\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n if template is not None:\n self.build_graph(template.detach())\n else:\n self.build_graph(x.detach())\n new_graph = torch.Tensor()\n codes = torch.Tensor()\n temp_input = [torch.Tensor()]\n for kk in range(x.shape[0]):\n for ll in range(x.shape[0]):\n if self.graph[kk, ll]:\n temp_input[-1] = torch.cat([temp_input[-1], self.\n edge_model(x[ll]).unsqueeze(0)])\n keys = temp_input[-1][:, -self.dim_h * 2:-self.dim_h]\n queries = temp_input[-1][:, -self.dim_h:]\n attention = torch.zeros(1, keys.shape[0])\n for mm in range(keys.shape[0]):\n attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)\n attention = torch.softmax(attention, dim=1)\n my_input = torch.sum(attention.T * temp_input[-1][:, :self.\n ligand_dim], dim=0)\n my_input = torch.cat([x[kk], my_input])\n codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])\n new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])\n if return_codes:\n return codes, new_graph\n else:\n return new_graph\n\n def get_actions(self, x):\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n my_template = x\n for ii in range(self.number_updates):\n x = self.forward(x, template=my_template)\n x = torch.mean(x, dim=0)\n x = self.action_layer(x)\n return x\n\n def get_params(self):\n params = np.array([])\n for param in self.edge_model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.encoder.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.action_layer.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.edge_model.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.encoder.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.action_layer.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\n\nclass MLP(nn.Module):\n\n def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):\n super(MLP, self).__init__()\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 32\n self.dropout = dropout\n self.model_name = 'DockRLMLP'\n self.init_params()\n\n def init_params(self):\n self.model = nn.Sequential(nn.Linear(self.dim_in, self.dim_h), nn.\n ReLU(), nn.Linear(self.dim_h, self.dim_h), nn.ReLU(), nn.\n Dropout(p=self.dropout), nn.Linear(self.dim_h, self.dim_act))\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n x = torch.Tensor(x)\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n x = self.model(x)\n return x\n\n def get_actions(self, x):\n act = self.forward(x)\n act = torch.mean(act, dim=0, keepdim=True)\n return act\n\n def get_params(self):\n params = np.array([])\n for param in self.model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.model.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\n\nclass MRNN(nn.Module):\n\n def __init__(self, dim_in=6, dim_act=5):\n super(MRNN, self).__init__()\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 8\n self.init_params()\n\n def init_params(self):\n self.g = nn.Sequential(OrderedDict([('g', nn.Linear(self.dim_h +\n self.dim_in, self.dim_h)), ('act_g', nn.Sigmoid())]))\n self.j = nn.Sequential(OrderedDict([('j', nn.Linear(self.dim_h +\n self.dim_in, self.dim_h)), ('act_j', nn.Tanh())]))\n self.w_h2y = nn.Sequential(OrderedDict([('w_h2y', nn.Linear(self.\n dim_h, self.dim_act))]))\n self.cell_state = torch.zeros((1, self.dim_h))\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n x = torch.Tensor(x)\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n x = torch.cat((self.cell_state, x), axis=-1)\n g_out = self.g(x)\n j_out = (1.0 - g_out) * self.j(x)\n self.cell_state = g_out * self.cell_state + j_out\n y = self.w_h2y(self.cell_state)\n return y\n\n def get_action(self, x):\n act = self.forward(x)\n return act.detach().cpu().numpy()\n\n def get_params(self):\n params = np.array([])\n for param in self.g.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.j.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.w_h2y.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.g.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.j.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.w_h2y.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n self.cell_state *= 0.0\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Params:\n <mask token>\n\n def init_params(self):\n self.params = np.random.randn(self.dim_act)\n self.num_params = self.dim_act\n\n def forward(self, obs):\n return self.get_params()\n\n def get_params(self):\n return self.params\n <mask token>\n\n def reset(self):\n pass\n\n\nclass GraphNN(nn.Module):\n\n def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.0):\n super(GraphNN, self).__init__()\n self.ligand_dim = dim_in\n self.dim_h = dim_h\n self.dim_act = dim_act\n self.model_name = 'DockRLGraphNN'\n self.bond_cutoff = 3.6\n self.number_updates = 16\n self.dropout = dropout\n self.initialize_gnn()\n self.reset()\n my_params = self.get_params()\n self.num_params = my_params.shape[0]\n\n def initialize_gnn(self):\n self.edge_model = nn.Sequential(nn.Linear(self.ligand_dim, self.\n dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_h), nn.\n LeakyReLU(), nn.Dropout(p=self.dropout), nn.Linear(self.dim_h, \n self.ligand_dim + 2 * self.dim_h))\n self.encoder = nn.Sequential(nn.Linear(2 * self.ligand_dim, self.\n ligand_dim), ArcTan())\n self.action_layer = nn.Sequential(nn.Linear(self.ligand_dim, self.\n dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_act))\n\n def get_distance(self, node_0, node_1):\n return torch.sum(torch.sqrt(torch.abs(node_0 - node_1) ** 2))\n\n def build_graph(self, x):\n self.graph = torch.zeros(x.shape[0], x.shape[0])\n for ii in range(x.shape[0]):\n node_ii = x[ii, 0:3]\n for jj in range(x.shape[0]):\n node_jj = x[jj, 0:3]\n distance = self.get_distance(node_ii, node_jj)\n if distance <= self.bond_cutoff:\n self.graph[ii, jj] = 1.0\n self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))\n\n def forward(self, x, return_codes=False, template=None):\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n if template is not None:\n self.build_graph(template.detach())\n else:\n self.build_graph(x.detach())\n new_graph = torch.Tensor()\n codes = torch.Tensor()\n temp_input = [torch.Tensor()]\n for kk in range(x.shape[0]):\n for ll in range(x.shape[0]):\n if self.graph[kk, ll]:\n temp_input[-1] = torch.cat([temp_input[-1], self.\n edge_model(x[ll]).unsqueeze(0)])\n keys = temp_input[-1][:, -self.dim_h * 2:-self.dim_h]\n queries = temp_input[-1][:, -self.dim_h:]\n attention = torch.zeros(1, keys.shape[0])\n for mm in range(keys.shape[0]):\n attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)\n attention = torch.softmax(attention, dim=1)\n my_input = torch.sum(attention.T * temp_input[-1][:, :self.\n ligand_dim], dim=0)\n my_input = torch.cat([x[kk], my_input])\n codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])\n new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])\n if return_codes:\n return codes, new_graph\n else:\n return new_graph\n\n def get_actions(self, x):\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n my_template = x\n for ii in range(self.number_updates):\n x = self.forward(x, template=my_template)\n x = torch.mean(x, dim=0)\n x = self.action_layer(x)\n return x\n\n def get_params(self):\n params = np.array([])\n for param in self.edge_model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.encoder.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.action_layer.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.edge_model.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.encoder.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.action_layer.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\n\nclass MLP(nn.Module):\n\n def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):\n super(MLP, self).__init__()\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 32\n self.dropout = dropout\n self.model_name = 'DockRLMLP'\n self.init_params()\n\n def init_params(self):\n self.model = nn.Sequential(nn.Linear(self.dim_in, self.dim_h), nn.\n ReLU(), nn.Linear(self.dim_h, self.dim_h), nn.ReLU(), nn.\n Dropout(p=self.dropout), nn.Linear(self.dim_h, self.dim_act))\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n x = torch.Tensor(x)\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n x = self.model(x)\n return x\n\n def get_actions(self, x):\n act = self.forward(x)\n act = torch.mean(act, dim=0, keepdim=True)\n return act\n\n def get_params(self):\n params = np.array([])\n for param in self.model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.model.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\n\nclass MRNN(nn.Module):\n\n def __init__(self, dim_in=6, dim_act=5):\n super(MRNN, self).__init__()\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 8\n self.init_params()\n\n def init_params(self):\n self.g = nn.Sequential(OrderedDict([('g', nn.Linear(self.dim_h +\n self.dim_in, self.dim_h)), ('act_g', nn.Sigmoid())]))\n self.j = nn.Sequential(OrderedDict([('j', nn.Linear(self.dim_h +\n self.dim_in, self.dim_h)), ('act_j', nn.Tanh())]))\n self.w_h2y = nn.Sequential(OrderedDict([('w_h2y', nn.Linear(self.\n dim_h, self.dim_act))]))\n self.cell_state = torch.zeros((1, self.dim_h))\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n x = torch.Tensor(x)\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n x = torch.cat((self.cell_state, x), axis=-1)\n g_out = self.g(x)\n j_out = (1.0 - g_out) * self.j(x)\n self.cell_state = g_out * self.cell_state + j_out\n y = self.w_h2y(self.cell_state)\n return y\n\n def get_action(self, x):\n act = self.forward(x)\n return act.detach().cpu().numpy()\n\n def get_params(self):\n params = np.array([])\n for param in self.g.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.j.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.w_h2y.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.g.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.j.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.w_h2y.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n self.cell_state *= 0.0\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ArcTan(nn.Module):\n <mask token>\n <mask token>\n\n\nclass Params:\n\n def __init__(self, dim_in=7, dim_act=6, dim_h=0, dropout=0.0):\n self.dim_act = dim_act\n self.dim_in = 0\n self.dim_h = 0\n self.dropout = 0.0\n self.model_name = 'DockRLParams'\n self.init_params()\n self.act = ArcTan()\n\n def init_params(self):\n self.params = np.random.randn(self.dim_act)\n self.num_params = self.dim_act\n\n def forward(self, obs):\n return self.get_params()\n\n def get_params(self):\n return self.params\n\n def set_params(self, params):\n assert params.shape == self.params.shape\n self.params = params\n\n def reset(self):\n pass\n\n\nclass GraphNN(nn.Module):\n\n def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.0):\n super(GraphNN, self).__init__()\n self.ligand_dim = dim_in\n self.dim_h = dim_h\n self.dim_act = dim_act\n self.model_name = 'DockRLGraphNN'\n self.bond_cutoff = 3.6\n self.number_updates = 16\n self.dropout = dropout\n self.initialize_gnn()\n self.reset()\n my_params = self.get_params()\n self.num_params = my_params.shape[0]\n\n def initialize_gnn(self):\n self.edge_model = nn.Sequential(nn.Linear(self.ligand_dim, self.\n dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_h), nn.\n LeakyReLU(), nn.Dropout(p=self.dropout), nn.Linear(self.dim_h, \n self.ligand_dim + 2 * self.dim_h))\n self.encoder = nn.Sequential(nn.Linear(2 * self.ligand_dim, self.\n ligand_dim), ArcTan())\n self.action_layer = nn.Sequential(nn.Linear(self.ligand_dim, self.\n dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_act))\n\n def get_distance(self, node_0, node_1):\n return torch.sum(torch.sqrt(torch.abs(node_0 - node_1) ** 2))\n\n def build_graph(self, x):\n self.graph = torch.zeros(x.shape[0], x.shape[0])\n for ii in range(x.shape[0]):\n node_ii = x[ii, 0:3]\n for jj in range(x.shape[0]):\n node_jj = x[jj, 0:3]\n distance = self.get_distance(node_ii, node_jj)\n if distance <= self.bond_cutoff:\n self.graph[ii, jj] = 1.0\n self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))\n\n def forward(self, x, return_codes=False, template=None):\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n if template is not None:\n self.build_graph(template.detach())\n else:\n self.build_graph(x.detach())\n new_graph = torch.Tensor()\n codes = torch.Tensor()\n temp_input = [torch.Tensor()]\n for kk in range(x.shape[0]):\n for ll in range(x.shape[0]):\n if self.graph[kk, ll]:\n temp_input[-1] = torch.cat([temp_input[-1], self.\n edge_model(x[ll]).unsqueeze(0)])\n keys = temp_input[-1][:, -self.dim_h * 2:-self.dim_h]\n queries = temp_input[-1][:, -self.dim_h:]\n attention = torch.zeros(1, keys.shape[0])\n for mm in range(keys.shape[0]):\n attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)\n attention = torch.softmax(attention, dim=1)\n my_input = torch.sum(attention.T * temp_input[-1][:, :self.\n ligand_dim], dim=0)\n my_input = torch.cat([x[kk], my_input])\n codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])\n new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])\n if return_codes:\n return codes, new_graph\n else:\n return new_graph\n\n def get_actions(self, x):\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n my_template = x\n for ii in range(self.number_updates):\n x = self.forward(x, template=my_template)\n x = torch.mean(x, dim=0)\n x = self.action_layer(x)\n return x\n\n def get_params(self):\n params = np.array([])\n for param in self.edge_model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.encoder.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.action_layer.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.edge_model.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.encoder.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.action_layer.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\n\nclass MLP(nn.Module):\n\n def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):\n super(MLP, self).__init__()\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 32\n self.dropout = dropout\n self.model_name = 'DockRLMLP'\n self.init_params()\n\n def init_params(self):\n self.model = nn.Sequential(nn.Linear(self.dim_in, self.dim_h), nn.\n ReLU(), nn.Linear(self.dim_h, self.dim_h), nn.ReLU(), nn.\n Dropout(p=self.dropout), nn.Linear(self.dim_h, self.dim_act))\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n x = torch.Tensor(x)\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n x = self.model(x)\n return x\n\n def get_actions(self, x):\n act = self.forward(x)\n act = torch.mean(act, dim=0, keepdim=True)\n return act\n\n def get_params(self):\n params = np.array([])\n for param in self.model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.model.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\n\nclass MRNN(nn.Module):\n\n def __init__(self, dim_in=6, dim_act=5):\n super(MRNN, self).__init__()\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 8\n self.init_params()\n\n def init_params(self):\n self.g = nn.Sequential(OrderedDict([('g', nn.Linear(self.dim_h +\n self.dim_in, self.dim_h)), ('act_g', nn.Sigmoid())]))\n self.j = nn.Sequential(OrderedDict([('j', nn.Linear(self.dim_h +\n self.dim_in, self.dim_h)), ('act_j', nn.Tanh())]))\n self.w_h2y = nn.Sequential(OrderedDict([('w_h2y', nn.Linear(self.\n dim_h, self.dim_act))]))\n self.cell_state = torch.zeros((1, self.dim_h))\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n x = torch.Tensor(x)\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n x = torch.cat((self.cell_state, x), axis=-1)\n g_out = self.g(x)\n j_out = (1.0 - g_out) * self.j(x)\n self.cell_state = g_out * self.cell_state + j_out\n y = self.w_h2y(self.cell_state)\n return y\n\n def get_action(self, x):\n act = self.forward(x)\n return act.detach().cpu().numpy()\n\n def get_params(self):\n params = np.array([])\n for param in self.g.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.j.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.w_h2y.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.g.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.j.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.w_h2y.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n self.cell_state *= 0.0\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass ArcTan(nn.Module):\n\n def __init__(self):\n super(ArcTan, self).__init__()\n <mask token>\n\n\nclass Params:\n\n def __init__(self, dim_in=7, dim_act=6, dim_h=0, dropout=0.0):\n self.dim_act = dim_act\n self.dim_in = 0\n self.dim_h = 0\n self.dropout = 0.0\n self.model_name = 'DockRLParams'\n self.init_params()\n self.act = ArcTan()\n\n def init_params(self):\n self.params = np.random.randn(self.dim_act)\n self.num_params = self.dim_act\n\n def forward(self, obs):\n return self.get_params()\n\n def get_params(self):\n return self.params\n\n def set_params(self, params):\n assert params.shape == self.params.shape\n self.params = params\n\n def reset(self):\n pass\n\n\nclass GraphNN(nn.Module):\n\n def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.0):\n super(GraphNN, self).__init__()\n self.ligand_dim = dim_in\n self.dim_h = dim_h\n self.dim_act = dim_act\n self.model_name = 'DockRLGraphNN'\n self.bond_cutoff = 3.6\n self.number_updates = 16\n self.dropout = dropout\n self.initialize_gnn()\n self.reset()\n my_params = self.get_params()\n self.num_params = my_params.shape[0]\n\n def initialize_gnn(self):\n self.edge_model = nn.Sequential(nn.Linear(self.ligand_dim, self.\n dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_h), nn.\n LeakyReLU(), nn.Dropout(p=self.dropout), nn.Linear(self.dim_h, \n self.ligand_dim + 2 * self.dim_h))\n self.encoder = nn.Sequential(nn.Linear(2 * self.ligand_dim, self.\n ligand_dim), ArcTan())\n self.action_layer = nn.Sequential(nn.Linear(self.ligand_dim, self.\n dim_h), nn.LeakyReLU(), nn.Linear(self.dim_h, self.dim_act))\n\n def get_distance(self, node_0, node_1):\n return torch.sum(torch.sqrt(torch.abs(node_0 - node_1) ** 2))\n\n def build_graph(self, x):\n self.graph = torch.zeros(x.shape[0], x.shape[0])\n for ii in range(x.shape[0]):\n node_ii = x[ii, 0:3]\n for jj in range(x.shape[0]):\n node_jj = x[jj, 0:3]\n distance = self.get_distance(node_ii, node_jj)\n if distance <= self.bond_cutoff:\n self.graph[ii, jj] = 1.0\n self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))\n\n def forward(self, x, return_codes=False, template=None):\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n if template is not None:\n self.build_graph(template.detach())\n else:\n self.build_graph(x.detach())\n new_graph = torch.Tensor()\n codes = torch.Tensor()\n temp_input = [torch.Tensor()]\n for kk in range(x.shape[0]):\n for ll in range(x.shape[0]):\n if self.graph[kk, ll]:\n temp_input[-1] = torch.cat([temp_input[-1], self.\n edge_model(x[ll]).unsqueeze(0)])\n keys = temp_input[-1][:, -self.dim_h * 2:-self.dim_h]\n queries = temp_input[-1][:, -self.dim_h:]\n attention = torch.zeros(1, keys.shape[0])\n for mm in range(keys.shape[0]):\n attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)\n attention = torch.softmax(attention, dim=1)\n my_input = torch.sum(attention.T * temp_input[-1][:, :self.\n ligand_dim], dim=0)\n my_input = torch.cat([x[kk], my_input])\n codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])\n new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])\n if return_codes:\n return codes, new_graph\n else:\n return new_graph\n\n def get_actions(self, x):\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n my_template = x\n for ii in range(self.number_updates):\n x = self.forward(x, template=my_template)\n x = torch.mean(x, dim=0)\n x = self.action_layer(x)\n return x\n\n def get_params(self):\n params = np.array([])\n for param in self.edge_model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.encoder.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.action_layer.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.edge_model.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.encoder.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.action_layer.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\n\nclass MLP(nn.Module):\n\n def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):\n super(MLP, self).__init__()\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 32\n self.dropout = dropout\n self.model_name = 'DockRLMLP'\n self.init_params()\n\n def init_params(self):\n self.model = nn.Sequential(nn.Linear(self.dim_in, self.dim_h), nn.\n ReLU(), nn.Linear(self.dim_h, self.dim_h), nn.ReLU(), nn.\n Dropout(p=self.dropout), nn.Linear(self.dim_h, self.dim_act))\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n x = torch.Tensor(x)\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n x = self.model(x)\n return x\n\n def get_actions(self, x):\n act = self.forward(x)\n act = torch.mean(act, dim=0, keepdim=True)\n return act\n\n def get_params(self):\n params = np.array([])\n for param in self.model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.model.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\n\nclass MRNN(nn.Module):\n\n def __init__(self, dim_in=6, dim_act=5):\n super(MRNN, self).__init__()\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 8\n self.init_params()\n\n def init_params(self):\n self.g = nn.Sequential(OrderedDict([('g', nn.Linear(self.dim_h +\n self.dim_in, self.dim_h)), ('act_g', nn.Sigmoid())]))\n self.j = nn.Sequential(OrderedDict([('j', nn.Linear(self.dim_h +\n self.dim_in, self.dim_h)), ('act_j', nn.Tanh())]))\n self.w_h2y = nn.Sequential(OrderedDict([('w_h2y', nn.Linear(self.\n dim_h, self.dim_act))]))\n self.cell_state = torch.zeros((1, self.dim_h))\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n x = torch.Tensor(x)\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n x = torch.cat((self.cell_state, x), axis=-1)\n g_out = self.g(x)\n j_out = (1.0 - g_out) * self.j(x)\n self.cell_state = g_out * self.cell_state + j_out\n y = self.w_h2y(self.cell_state)\n return y\n\n def get_action(self, x):\n act = self.forward(x)\n return act.detach().cpu().numpy()\n\n def get_params(self):\n params = np.array([])\n for param in self.g.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.j.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n for param in self.w_h2y.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n return params\n\n def set_params(self, my_params):\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params\n ) * torch.sqrt(torch.tensor(self.var))\n param_start = 0\n for name, param in self.g.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.j.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n for name, param in self.w_h2y.named_parameters():\n param_stop = param_start + reduce(lambda x, y: x * y, param.shape)\n param[:] = torch.nn.Parameter(torch.Tensor(my_params[\n param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n self.cell_state *= 0.0\n\n\n<mask token>\n",
"step-5": "import torch \nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport numpy as np\n\nfrom collections import OrderedDict\nfrom functools import reduce\n\nclass ArcTan(nn.Module):\n\n def __init__(self):\n super(ArcTan,self).__init__()\n\n def forward(self, x):\n\n return torch.arctan(x) / 1.5708\n\nclass Params():\n\n def __init__(self, dim_in=7, dim_act=6, dim_h=0, dropout=0.0):\n \n self.dim_act = dim_act\n self.dim_in = 0\n self.dim_h = 0\n self.dropout = 0.0\n self.model_name = \"DockRLParams\"\n\n self.init_params()\n self.act = ArcTan()\n\n def init_params(self):\n\n self.params = np.random.randn(self.dim_act)\n self.num_params = self.dim_act\n\n def forward(self, obs):\n return self.get_params()\n\n def get_params(self):\n return self.params\n\n def set_params(self, params):\n assert params.shape == self.params.shape\n\n self.params = params \n\n def reset(self):\n pass\n\n\n\nclass GraphNN(nn.Module):\n\n def __init__(self, dim_in=7, dim_act=6, dim_h=8, dropout=0.00):\n super(GraphNN, self).__init__()\n \n self.ligand_dim = dim_in\n self.dim_h = dim_h\n self.dim_act = dim_act\n self.model_name = \"DockRLGraphNN\"\n # This is a guesstimate based on: \n # https://pymolwiki.org/index.php/Displaying_Biochemical_Properties\n self.bond_cutoff = 3.6\n self.number_updates = 16\n self.dropout = dropout\n\n self.initialize_gnn()\n self.reset()\n\n my_params = self.get_params()\n self.num_params = my_params.shape[0]\n\n def initialize_gnn(self):\n\n # vertices MLP, with 8 element key and query vectors for self-attention\n self.edge_model = nn.Sequential(\\\n nn.Linear(self.ligand_dim, self.dim_h),\\\n nn.LeakyReLU(),\\\n nn.Linear(self.dim_h, self.dim_h),\\\n nn.LeakyReLU(),\\\n nn.Dropout(p=self.dropout),\\\n nn.Linear(self.dim_h, self.ligand_dim + 2 * self.dim_h)\n )\n\n self.encoder = nn.Sequential(\\\n nn.Linear(2*self.ligand_dim, self.ligand_dim),\\\n ArcTan()\n )\n\n self.action_layer = nn.Sequential(\\\n nn.Linear(self.ligand_dim, self.dim_h),\\\n nn.LeakyReLU(),\\\n nn.Linear(self.dim_h, self.dim_act)\\\n )\n \n def get_distance(self, node_0, node_1):\n\n return torch.sum(torch.sqrt(torch.abs(node_0 - node_1)**2))\n\n def build_graph(self, x):\n\n self.graph = torch.zeros(x.shape[0],x.shape[0])\n\n for ii in range(x.shape[0]):\n node_ii = x[ii, 0:3]\n for jj in range(x.shape[0]):\n node_jj = x[jj, 0:3]\n\n distance = self.get_distance(node_ii, node_jj)\n if distance <= self.bond_cutoff:\n self.graph[ii, jj] = 1.0\n \n self.graph = self.graph * (1 - torch.eye(self.graph.shape[0]))\n\n def forward(self, x, return_codes=False, template=None):\n\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n\n if template is not None:\n self.build_graph(template.detach())\n else:\n self.build_graph(x.detach())\n \n new_graph = torch.Tensor() #torch.zeros_like(x)\n codes = torch.Tensor() #torch.zeros(x.shape[0], self.dim_h)\n temp_input = [torch.Tensor()] \n #orch.Tensor() #torch.zeros(x.shape[0], self.dim_h+8+8)\n\n\n for kk in range(x.shape[0]):\n # loop through nodes for each node\n for ll in range(x.shape[0]):\n if self.graph[kk,ll]:\n temp_input[-1] = torch.cat([temp_input[-1],\\\n self.edge_model(x[ll]).unsqueeze(0)])\n\n keys = temp_input[-1][:,-self.dim_h*2:-self.dim_h]\n queries = temp_input[-1][:,-self.dim_h:]\n\n attention = torch.zeros(1, keys.shape[0])\n\n for mm in range(keys.shape[0]):\n attention[:, mm] = torch.matmul(queries[mm], keys[mm].T)\n\n attention = torch.softmax(attention, dim=1)\n\n my_input = torch.sum(attention.T \\\n * temp_input[-1][:,:self.ligand_dim],dim=0)\n my_input = torch.cat([x[kk], my_input])\n\n #this is where the cell gating would happen (TODO)\n codes = torch.cat([codes, self.encoder(my_input).unsqueeze(0)])\n\n new_graph = torch.cat([new_graph, codes[-1].unsqueeze(0)])\n #self.decoder(codes[-1]).unsqueeze(0)])\n\n\n if return_codes:\n return codes, new_graph\n else:\n return new_graph\n\n\n def get_actions(self, x):\n\n if type(x) != torch.Tensor:\n x = torch.Tensor(x)\n\n my_template = x\n\n for ii in range(self.number_updates):\n x = self.forward(x, template=my_template)\n\n x = torch.mean(x, dim=0)\n\n x = self.action_layer(x)\n\n return x\n\n def get_params(self):\n params = np.array([])\n\n for param in self.edge_model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n\n for param in self.encoder.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n\n# for param in self.decoder.named_parameters():\n# params = np.append(params, param[1].detach().numpy().ravel())\n\n for param in self.action_layer.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n\n return params\n\n def set_params(self, my_params):\n\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params) * torch.sqrt(torch.tensor(self.var))\n\n param_start = 0\n for name, param in self.edge_model.named_parameters():\n\n param_stop = param_start + reduce(lambda x,y: x*y, param.shape)\n\n param[:] = torch.nn.Parameter(torch.Tensor(\\\n my_params[param_start:param_stop].reshape(param.shape)))\n\n for name, param in self.encoder.named_parameters():\n\n param_stop = param_start + reduce(lambda x,y: x*y, param.shape)\n\n param[:] = torch.nn.Parameter(torch.Tensor(\\\n my_params[param_start:param_stop].reshape(param.shape)))\n\n# for name, param in self.decoder.named_parameters():\n#\n# param_stop = param_start + reduce(lambda x,y: x*y, param.shape)\n#\n# param[:] = torch.nn.Parameter(torch.Tensor(\\\n# my_params[param_start:param_stop].reshape(param.shape)))\n\n for name, param in self.action_layer.named_parameters():\n\n param_stop = param_start + reduce(lambda x,y: x*y, param.shape)\n\n param[:] = torch.nn.Parameter(torch.Tensor(\\\n my_params[param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n # initialize using gated cell states here later (maybe)\n pass\n\nclass MLP(nn.Module):\n def __init__(self, dim_in=6, dim_act=5, dim_h=32, dropout=0.0):\n super(MLP, self).__init__()\n\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 32\n self.dropout = dropout\n self.model_name = \"DockRLMLP\"\n\n self.init_params()\n\n def init_params(self):\n\n self.model = nn.Sequential(\\\n nn.Linear(self.dim_in, self.dim_h),\\\n nn.ReLU(),\\\n nn.Linear(self.dim_h, self.dim_h),\\\n nn.ReLU(),\\\n nn.Dropout(p=self.dropout),\\\n nn.Linear(self.dim_h, self.dim_act)\\\n )\n\n self.num_params = self.get_params().shape[0]\n\n def forward(self, x):\n\n x = torch.Tensor(x)\n\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n\n\n x = self.model(x)\n\n\n return x\n\n def get_actions(self, x):\n\n act = self.forward(x)\n act = torch.mean(act, dim=0, keepdim=True)\n return act\n\n def get_params(self):\n params = np.array([])\n\n for param in self.model.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n\n return params\n\n def set_params(self, my_params):\n\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params) * torch.sqrt(torch.tensor(self.var))\n\n param_start = 0\n for name, param in self.model.named_parameters():\n\n param_stop = param_start + reduce(lambda x,y: x*y, param.shape)\n\n param[:] = torch.nn.Parameter(torch.Tensor(\\\n my_params[param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n pass\n\nclass MRNN(nn.Module):\n def __init__(self, dim_in=6, dim_act=5):\n super(MRNN, self).__init__()\n\n self.dim_in = dim_in\n self.dim_act = dim_act\n self.dim_h = 8\n\n self.init_params()\n\n\n def init_params(self):\n\n self.g = nn.Sequential(OrderedDict([\\\n (\"g\", nn.Linear(self.dim_h+self.dim_in, self.dim_h)),\\\n (\"act_g\", nn.Sigmoid())]))\n\n self.j = nn.Sequential(OrderedDict([\\\n (\"j\", nn.Linear(self.dim_h+self.dim_in, self.dim_h)),\\\n (\"act_j\", nn.Tanh())]))\n\n self.w_h2y = nn.Sequential(OrderedDict([\\\n (\"w_h2y\", nn.Linear(self.dim_h, self.dim_act))]))\n\n self.cell_state = torch.zeros((1,self.dim_h))\n\n self.num_params = self.get_params().shape[0]\n \n def forward(self, x):\n \n x = torch.Tensor(x)\n\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n\n x = torch.cat((self.cell_state, x), axis=-1)\n\n g_out = self.g(x) \n\n j_out = (1.0 - g_out) * self.j(x)\n\n self.cell_state = g_out * self.cell_state + j_out\n\n y = self.w_h2y(self.cell_state) \n\n return y\n \n def get_action(self, x):\n\n act = self.forward(x)\n return act.detach().cpu().numpy()\n\n def get_params(self):\n params = np.array([])\n\n for param in self.g.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n\n for param in self.j.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n\n for param in self.w_h2y.named_parameters():\n params = np.append(params, param[1].detach().numpy().ravel())\n\n return params\n\n def set_params(self, my_params):\n\n if my_params is None:\n my_params = self.init_mean + torch.randn(self.num_params) * torch.sqrt(torch.tensor(self.var))\n\n param_start = 0\n for name, param in self.g.named_parameters():\n\n param_stop = param_start + reduce(lambda x,y: x*y, param.shape)\n\n param[:] = torch.nn.Parameter(torch.Tensor(\\\n my_params[param_start:param_stop].reshape(param.shape)))\n\n for name, param in self.j.named_parameters():\n\n param_stop = param_start + reduce(lambda x,y: x*y, param.shape)\n\n param[:] = torch.nn.Parameter(torch.Tensor(\\\n my_params[param_start:param_stop].reshape(param.shape)))\n\n for name, param in self.w_h2y.named_parameters():\n\n param_stop = param_start + reduce(lambda x,y: x*y, param.shape)\n\n param[:] = torch.nn.Parameter(torch.Tensor(\\\n my_params[param_start:param_stop].reshape(param.shape)))\n\n def reset(self):\n self.cell_state *= 0. \n\n\nif __name__ == \"__main__\":\n\n mrnn = MRNN()\n\n temp = mrnn.forward(np.random.randn(1,6))\n print(temp)\n",
"step-ids": [
26,
31,
34,
35,
39
]
}
|
[
26,
31,
34,
35,
39
] |
#17219
tot, inp = map(int, input().split())
ID_dict = {}
for _ in range(tot):
id, pw = map(str, input().split())
ID_dict[id] = pw
for _ in range(inp):
print(ID_dict[input()])
|
normal
|
{
"blob_id": "cf7556034020d88ddb6b71b9f908c905e2f03cdb",
"index": 4076,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(tot):\n id, pw = map(str, input().split())\n ID_dict[id] = pw\nfor _ in range(inp):\n print(ID_dict[input()])\n",
"step-3": "tot, inp = map(int, input().split())\nID_dict = {}\nfor _ in range(tot):\n id, pw = map(str, input().split())\n ID_dict[id] = pw\nfor _ in range(inp):\n print(ID_dict[input()])\n",
"step-4": "#17219\ntot, inp = map(int, input().split())\nID_dict = {}\n\nfor _ in range(tot):\n id, pw = map(str, input().split())\n ID_dict[id] = pw\n\nfor _ in range(inp):\n print(ID_dict[input()])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
for vcenter, data in info.items():
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
for dc_name, dc_data in data['datacenter'].items():
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {logging.CRITICAL: collectd.error, logging.
ERROR: collectd.error, logging.WARN: collectd.warning,
logging.INFO: collectd.info, logging.DEBUG: collectd.info}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Collector(object):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name', 'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize', 'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':
cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,
'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':
mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,
'vms_stopped': vms_stopped}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
for vcenter, data in info.items():
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
for dc_name, dc_data in data['datacenter'].items():
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {logging.CRITICAL: collectd.error, logging.
ERROR: collectd.error, logging.WARN: collectd.warning,
logging.INFO: collectd.info, logging.DEBUG: collectd.info}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Collector(object):
def __init__(self, vcenters, username=None, password=None, verbose=False):
"""
Configuration to poll a vCenter cluster for performance data.
:param list vcenters:
A list of one or more vCenter server IPs or hostnames.
:param str username:
The username to use to authenticate against the vCenter cluster.
:param str password:
The password associated with the specified user.
:param bool verbose: (optional)
Whether to enable verbose logging.
:param int sleep_time: (optional)
Number of seconds to wait between polls.
"""
self.vcenters = vcenters
self.username = username
self.password = password
self.verbose = verbose
if COLLECTD_ENABLED:
self.log = logging.getLogger()
self.log.addHandler(CollectdHandler(self.verbose))
else:
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger()
def poll(self):
"""
Collect current performance information.
"""
stats = {}
for vcenter in self.vcenters:
stats[vcenter] = self.poll_vcenter(vcenter)
return stats
def poll_vcenter(self, vcenter):
"""
Open a connection to the specified vCenter server and begin gathering
information about its datastores, datacenters, clusters, and hosts.
:param str vcenter:
The hostname or IP of a vCenter server.
:returns:
A dictionary containing information about the current state of
objects managed by the specified vCenter.
"""
self.log.debug('polling %s@%s' % (self.username, vcenter))
server = VIServer()
try:
server.connect(vcenter, self.username, self.password)
except:
self.log.exception('Failed to connect to %s' % (vcenter,))
return {}
stats = {'datastore': {}, 'datacenter': {}}
for obj, name in server.get_datastores().items():
ds_stats = self.poll_datastore(server, obj, name)
stats['datastore'][name] = ds_stats
datacenters = server.get_datacenters()
for obj, name in datacenters.items():
dc_stats = self.poll_datacenter(server, obj, name)
stats['datacenter'][name] = dc_stats
return stats
<|reserved_special_token_0|>
def poll_datacenter(self, server, obj, name):
"""
Gather metrics about a specific datacenter.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datacenter.
:param str name:
Name of the datacenter.
:returns:
A dictionary with several keys describing the current state of the
datacenter. This dictionary includes information about each cluster
and host that is part of the specified datacenter.
"""
if '.' in name:
name = name.split('.')[0]
stats = self._poll_group('datacenter', server, obj, name)
cluster_host_stats = self._poll_group('cluster', server, obj, name)
for key, value in cluster_host_stats.items():
if key not in stats:
stats[key] = value
elif isinstance(stats[key], dict):
for c_key, c_value in value.items():
stats[key][c_key] = c_value
elif 'percent' in key:
stats[key] = (stats[key] + value) / 2
else:
stats[key] += value
return stats
def poll_cluster(self, server, obj, name):
"""
Gather metrics about a specific cluster.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the cluster.
:param str name:
Name of the cluster.
:returns:
A dictionary with several keys describing the current state of the
cluster. This dictionary includes information about each host that
is part of the specified cluster.
"""
return self._poll_group('cluster', server, obj, name)
def _poll_group(self, group_type, server, obj, name):
"""
Generic metrics gathering for datacenters and clusters.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for a datacenter or cluster.
:param str name:
Name of a datacenter or cluster.
:returns:
A dictionary with several keys describing the current state of the
datacenter/cluster. This dictionary includes information about each
cluster and/or host that is part of the specified object.
"""
if group_type == 'datacenter':
find_children = server.get_clusters
poll_child = self.poll_cluster
child_type = 'cluster'
elif group_type == 'cluster':
find_children = server.get_clusters
find_children = server.get_hosts
poll_child = self.poll_host
child_type = 'host'
self.log.debug('start querying %s: %s' % (group_type, name))
children = find_children(obj)
self.log.debug('finish querying %s: %s' % (group_type, name))
cpu_total = cpu_usage = cpu_percent = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
child_stats = {}
for child_obj, child_name in children.items():
stats = poll_child(server, child_obj, child_name)
child_stats[child_name] = stats
cpu_total += stats['cpu_total']
cpu_usage += stats['cpu_usage']
mem_total += stats['mem_total']
mem_usage += stats['mem_usage']
vms_total += stats['vms_total']
vms_running += stats['vms_running']
vms_stopped += stats['vms_stopped']
if cpu_total > 0:
cpu_percent = cpu_usage / float(cpu_total) * 100
if mem_total > 0:
mem_percent = mem_usage / float(mem_total) * 100
group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':
mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,
'vms_running': vms_running, 'vms_stopped': vms_stopped,
child_type: child_stats}
return group_stats
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name', 'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize', 'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':
cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,
'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':
mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,
'vms_stopped': vms_stopped}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
for vcenter, data in info.items():
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
for dc_name, dc_data in data['datacenter'].items():
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {logging.CRITICAL: collectd.error, logging.
ERROR: collectd.error, logging.WARN: collectd.warning,
logging.INFO: collectd.info, logging.DEBUG: collectd.info}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Collector(object):
def __init__(self, vcenters, username=None, password=None, verbose=False):
"""
Configuration to poll a vCenter cluster for performance data.
:param list vcenters:
A list of one or more vCenter server IPs or hostnames.
:param str username:
The username to use to authenticate against the vCenter cluster.
:param str password:
The password associated with the specified user.
:param bool verbose: (optional)
Whether to enable verbose logging.
:param int sleep_time: (optional)
Number of seconds to wait between polls.
"""
self.vcenters = vcenters
self.username = username
self.password = password
self.verbose = verbose
if COLLECTD_ENABLED:
self.log = logging.getLogger()
self.log.addHandler(CollectdHandler(self.verbose))
else:
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger()
def poll(self):
"""
Collect current performance information.
"""
stats = {}
for vcenter in self.vcenters:
stats[vcenter] = self.poll_vcenter(vcenter)
return stats
def poll_vcenter(self, vcenter):
"""
Open a connection to the specified vCenter server and begin gathering
information about its datastores, datacenters, clusters, and hosts.
:param str vcenter:
The hostname or IP of a vCenter server.
:returns:
A dictionary containing information about the current state of
objects managed by the specified vCenter.
"""
self.log.debug('polling %s@%s' % (self.username, vcenter))
server = VIServer()
try:
server.connect(vcenter, self.username, self.password)
except:
self.log.exception('Failed to connect to %s' % (vcenter,))
return {}
stats = {'datastore': {}, 'datacenter': {}}
for obj, name in server.get_datastores().items():
ds_stats = self.poll_datastore(server, obj, name)
stats['datastore'][name] = ds_stats
datacenters = server.get_datacenters()
for obj, name in datacenters.items():
dc_stats = self.poll_datacenter(server, obj, name)
stats['datacenter'][name] = dc_stats
return stats
def poll_datastore(self, server, obj, name):
"""
Gather metrics about a specific datastore.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datastore.
:param str name:
Name of the datastore.
:returns:
A dictionary with four keys: capacity, free, used, and usage. The
capacity, free, and used space are measured in megabytes while the
usage is a percentage.
"""
capacity = free = usage = 0
try:
self.log.debug('query datastore %s' % (name,))
props = server._retrieve_properties_traversal(property_names=[
'name', 'summary.capacity', 'summary.freeSpace'], from_node
=obj, obj_type='Datastore')
for ps in props:
for prop in ps.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.capacity':
capacity = pv / MB
elif pn == 'summary.freeSpace':
free = pv / MB
except:
self.log.exception('Failed to get datastore metrics')
if capacity > 0:
usage = (capacity - free) / float(capacity) * 100
return {'capacity': capacity, 'free': free, 'used': capacity - free,
'usage': usage}
def poll_datacenter(self, server, obj, name):
"""
Gather metrics about a specific datacenter.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datacenter.
:param str name:
Name of the datacenter.
:returns:
A dictionary with several keys describing the current state of the
datacenter. This dictionary includes information about each cluster
and host that is part of the specified datacenter.
"""
if '.' in name:
name = name.split('.')[0]
stats = self._poll_group('datacenter', server, obj, name)
cluster_host_stats = self._poll_group('cluster', server, obj, name)
for key, value in cluster_host_stats.items():
if key not in stats:
stats[key] = value
elif isinstance(stats[key], dict):
for c_key, c_value in value.items():
stats[key][c_key] = c_value
elif 'percent' in key:
stats[key] = (stats[key] + value) / 2
else:
stats[key] += value
return stats
def poll_cluster(self, server, obj, name):
"""
Gather metrics about a specific cluster.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the cluster.
:param str name:
Name of the cluster.
:returns:
A dictionary with several keys describing the current state of the
cluster. This dictionary includes information about each host that
is part of the specified cluster.
"""
return self._poll_group('cluster', server, obj, name)
def _poll_group(self, group_type, server, obj, name):
"""
Generic metrics gathering for datacenters and clusters.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for a datacenter or cluster.
:param str name:
Name of a datacenter or cluster.
:returns:
A dictionary with several keys describing the current state of the
datacenter/cluster. This dictionary includes information about each
cluster and/or host that is part of the specified object.
"""
if group_type == 'datacenter':
find_children = server.get_clusters
poll_child = self.poll_cluster
child_type = 'cluster'
elif group_type == 'cluster':
find_children = server.get_clusters
find_children = server.get_hosts
poll_child = self.poll_host
child_type = 'host'
self.log.debug('start querying %s: %s' % (group_type, name))
children = find_children(obj)
self.log.debug('finish querying %s: %s' % (group_type, name))
cpu_total = cpu_usage = cpu_percent = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
child_stats = {}
for child_obj, child_name in children.items():
stats = poll_child(server, child_obj, child_name)
child_stats[child_name] = stats
cpu_total += stats['cpu_total']
cpu_usage += stats['cpu_usage']
mem_total += stats['mem_total']
mem_usage += stats['mem_usage']
vms_total += stats['vms_total']
vms_running += stats['vms_running']
vms_stopped += stats['vms_stopped']
if cpu_total > 0:
cpu_percent = cpu_usage / float(cpu_total) * 100
if mem_total > 0:
mem_percent = mem_usage / float(mem_total) * 100
group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':
mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,
'vms_running': vms_running, 'vms_stopped': vms_stopped,
child_type: child_stats}
return group_stats
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name', 'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize', 'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':
cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,
'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':
mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,
'vms_stopped': vms_stopped}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
for vcenter, data in info.items():
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
for dc_name, dc_data in data['datacenter'].items():
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {logging.CRITICAL: collectd.error, logging.
ERROR: collectd.error, logging.WARN: collectd.warning,
logging.INFO: collectd.info, logging.DEBUG: collectd.info}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
<|reserved_special_token_0|>
<|reserved_special_token_1|>
# collectd-vcenter - vcenter.py
#
# Author : Loic Lambiel @ exoscale
# Contributor : Josh VanderLinden
# Description : This is a collectd python module to gather stats from Vmware
# vcenter
import logging
import ssl
import time
from pysphere import VIServer
try:
import collectd
COLLECTD_ENABLED = True
except ImportError:
COLLECTD_ENABLED = False
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
MB = 1024 ** 2
HOST_STATUS = ('green', 'gray', 'yellow', 'red')
class Collector(object):
def __init__(self, vcenters, username=None, password=None,
verbose=False):
"""
Configuration to poll a vCenter cluster for performance data.
:param list vcenters:
A list of one or more vCenter server IPs or hostnames.
:param str username:
The username to use to authenticate against the vCenter cluster.
:param str password:
The password associated with the specified user.
:param bool verbose: (optional)
Whether to enable verbose logging.
:param int sleep_time: (optional)
Number of seconds to wait between polls.
"""
self.vcenters = vcenters
self.username = username
self.password = password
self.verbose = verbose
if COLLECTD_ENABLED:
self.log = logging.getLogger()
self.log.addHandler(CollectdHandler(self.verbose))
else:
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger()
def poll(self):
"""
Collect current performance information.
"""
stats = {}
for vcenter in self.vcenters:
stats[vcenter] = self.poll_vcenter(vcenter)
return stats
def poll_vcenter(self, vcenter):
"""
Open a connection to the specified vCenter server and begin gathering
information about its datastores, datacenters, clusters, and hosts.
:param str vcenter:
The hostname or IP of a vCenter server.
:returns:
A dictionary containing information about the current state of
objects managed by the specified vCenter.
"""
self.log.debug('polling %s@%s' % (self.username, vcenter))
server = VIServer()
try:
server.connect(vcenter, self.username, self.password)
except:
self.log.exception('Failed to connect to %s' % (vcenter,))
return {}
stats = {
'datastore': {},
'datacenter': {},
}
for obj, name in server.get_datastores().items():
ds_stats = self.poll_datastore(server, obj, name)
stats['datastore'][name] = ds_stats
datacenters = server.get_datacenters()
for obj, name in datacenters.items():
dc_stats = self.poll_datacenter(server, obj, name)
stats['datacenter'][name] = dc_stats
return stats
def poll_datastore(self, server, obj, name):
"""
Gather metrics about a specific datastore.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datastore.
:param str name:
Name of the datastore.
:returns:
A dictionary with four keys: capacity, free, used, and usage. The
capacity, free, and used space are measured in megabytes while the
usage is a percentage.
"""
capacity = free = usage = 0
try:
self.log.debug('query datastore %s' % (name,))
props = server._retrieve_properties_traversal(property_names=[
'name',
'summary.capacity',
'summary.freeSpace',
], from_node=obj, obj_type='Datastore')
for ps in props:
for prop in ps.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.capacity':
capacity = pv / MB
elif pn == 'summary.freeSpace':
free = pv / MB
except:
self.log.exception('Failed to get datastore metrics')
if capacity > 0:
usage = (capacity - free) / float(capacity) * 100
return {
'capacity': capacity,
'free': free,
'used': capacity - free,
'usage': usage,
}
def poll_datacenter(self, server, obj, name):
"""
Gather metrics about a specific datacenter.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datacenter.
:param str name:
Name of the datacenter.
:returns:
A dictionary with several keys describing the current state of the
datacenter. This dictionary includes information about each cluster
and host that is part of the specified datacenter.
"""
if '.' in name:
name = name.split('.')[0]
stats = self._poll_group('datacenter', server, obj, name)
cluster_host_stats = self._poll_group('cluster', server, obj, name)
for key, value in cluster_host_stats.items():
if key not in stats:
stats[key] = value
elif isinstance(stats[key], dict):
for c_key, c_value in value.items():
stats[key][c_key] = c_value
else:
if 'percent' in key:
stats[key] = (stats[key] + value) / 2
else:
stats[key] += value
return stats
def poll_cluster(self, server, obj, name):
"""
Gather metrics about a specific cluster.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the cluster.
:param str name:
Name of the cluster.
:returns:
A dictionary with several keys describing the current state of the
cluster. This dictionary includes information about each host that
is part of the specified cluster.
"""
return self._poll_group('cluster', server, obj, name)
def _poll_group(self, group_type, server, obj, name):
"""
Generic metrics gathering for datacenters and clusters.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for a datacenter or cluster.
:param str name:
Name of a datacenter or cluster.
:returns:
A dictionary with several keys describing the current state of the
datacenter/cluster. This dictionary includes information about each
cluster and/or host that is part of the specified object.
"""
# change collection behavior based on the type of group we're dealing
# with
if group_type == 'datacenter':
# find each cluster in the datacenter
find_children = server.get_clusters
poll_child = self.poll_cluster
child_type = 'cluster'
elif group_type == 'cluster':
# find each host in the datacenter or cluster
find_children = server.get_clusters
find_children = server.get_hosts
poll_child = self.poll_host
child_type = 'host'
self.log.debug('start querying %s: %s' % (group_type, name))
children = find_children(obj)
self.log.debug('finish querying %s: %s' % (group_type, name))
# initialize some metrics
cpu_total = cpu_usage = cpu_percent = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
child_stats = {}
# iterate over each child node in this object group
for child_obj, child_name in children.items():
stats = poll_child(server, child_obj, child_name)
child_stats[child_name] = stats
# aggregate data from each child to the top level
cpu_total += stats['cpu_total']
cpu_usage += stats['cpu_usage']
mem_total += stats['mem_total']
mem_usage += stats['mem_usage']
vms_total += stats['vms_total']
vms_running += stats['vms_running']
vms_stopped += stats['vms_stopped']
# recalculate percentages
if cpu_total > 0:
cpu_percent = cpu_usage / float(cpu_total) * 100
if mem_total > 0:
mem_percent = mem_usage / float(mem_total) * 100
# return the current metrics for this group
group_stats = {
'cpu_total': cpu_total,
'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent,
'mem_total': mem_total,
'mem_usage': mem_usage,
'mem_percent': mem_percent,
'vms_total': vms_total,
'vms_running': vms_running,
'vms_stopped': vms_stopped,
child_type: child_stats,
}
return group_stats
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name',
'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize',
'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz',
], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {
'status': status,
'cpu_total': cpu_total,
'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent,
'cpu_count': cpu_count,
'mem_total': mem_total,
'mem_usage': mem_usage,
'mem_percent': mem_percent,
'vms_total': vms_total,
'vms_running': vms_running,
'vms_stopped': vms_stopped,
}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
# report information for all vCenter servers
for vcenter, data in info.items():
# report datastore information
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
# report datacenter information
for dc_name, dc_data in data['datacenter'].items():
# extract any cluster and host information for later processing
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
# report cluster information
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
# report host information
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {
logging.CRITICAL: collectd.error,
logging.ERROR: collectd.error,
logging.WARN: collectd.warning,
logging.INFO: collectd.info,
logging.DEBUG: collectd.info,
}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
if COLLECTD_ENABLED:
instance = CollectdCollector([])
collectd.register_config(instance.configure)
collectd.register_read(instance.read)
|
flexible
|
{
"blob_id": "55f76ae1ffe0fb2d2ca2c7a20aab45ffb00cf178",
"index": 613,
"step-1": "<mask token>\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Collector(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None, verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n stats = {'datastore': {}, 'datacenter': {}}\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n return stats\n <mask token>\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n if '.' in name:\n name = name.split('.')[0]\n stats = self._poll_group('datacenter', server, obj, name)\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n elif 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n if group_type == 'datacenter':\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':\n mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,\n 'vms_running': vms_running, 'vms_stopped': vms_stopped,\n child_type: child_stats}\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None, verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n stats = {'datastore': {}, 'datacenter': {}}\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n return stats\n\n def poll_datastore(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datastore.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datastore.\n :param str name:\n Name of the datastore.\n\n :returns:\n A dictionary with four keys: capacity, free, used, and usage. The\n capacity, free, and used space are measured in megabytes while the\n usage is a percentage.\n\n \"\"\"\n capacity = free = usage = 0\n try:\n self.log.debug('query datastore %s' % (name,))\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.capacity', 'summary.freeSpace'], from_node\n =obj, obj_type='Datastore')\n for ps in props:\n for prop in ps.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.capacity':\n capacity = pv / MB\n elif pn == 'summary.freeSpace':\n free = pv / MB\n except:\n self.log.exception('Failed to get datastore metrics')\n if capacity > 0:\n usage = (capacity - free) / float(capacity) * 100\n return {'capacity': capacity, 'free': free, 'used': capacity - free,\n 'usage': usage}\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n if '.' in name:\n name = name.split('.')[0]\n stats = self._poll_group('datacenter', server, obj, name)\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n elif 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n if group_type == 'datacenter':\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':\n mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,\n 'vms_running': vms_running, 'vms_stopped': vms_stopped,\n child_type: child_stats}\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-5": "# collectd-vcenter - vcenter.py\n#\n# Author : Loic Lambiel @ exoscale\n# Contributor : Josh VanderLinden\n# Description : This is a collectd python module to gather stats from Vmware\n# vcenter\n\nimport logging\nimport ssl\nimport time\n\nfrom pysphere import VIServer\n\ntry:\n import collectd\n COLLECTD_ENABLED = True\nexcept ImportError:\n COLLECTD_ENABLED = False\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\nelse:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\nMB = 1024 ** 2\nHOST_STATUS = ('green', 'gray', 'yellow', 'red')\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None,\n verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n\n stats = {\n 'datastore': {},\n 'datacenter': {},\n }\n\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n\n return stats\n\n def poll_datastore(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datastore.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datastore.\n :param str name:\n Name of the datastore.\n\n :returns:\n A dictionary with four keys: capacity, free, used, and usage. The\n capacity, free, and used space are measured in megabytes while the\n usage is a percentage.\n\n \"\"\"\n\n capacity = free = usage = 0\n\n try:\n self.log.debug('query datastore %s' % (name,))\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.capacity',\n 'summary.freeSpace',\n ], from_node=obj, obj_type='Datastore')\n\n for ps in props:\n for prop in ps.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.capacity':\n capacity = pv / MB\n elif pn == 'summary.freeSpace':\n free = pv / MB\n except:\n self.log.exception('Failed to get datastore metrics')\n\n if capacity > 0:\n usage = (capacity - free) / float(capacity) * 100\n\n return {\n 'capacity': capacity,\n 'free': free,\n 'used': capacity - free,\n 'usage': usage,\n }\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n\n if '.' in name:\n name = name.split('.')[0]\n\n stats = self._poll_group('datacenter', server, obj, name)\n\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n else:\n if 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n\n # change collection behavior based on the type of group we're dealing\n # with\n if group_type == 'datacenter':\n # find each cluster in the datacenter\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n # find each host in the datacenter or cluster\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n\n # initialize some metrics\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n\n # iterate over each child node in this object group\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n\n # aggregate data from each child to the top level\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n\n # recalculate percentages\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n\n # return the current metrics for this group\n group_stats = {\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n child_type: child_stats,\n }\n\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n\n self.log.debug('found host: %s' % (name,))\n\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize',\n 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz',\n ], from_node=obj, obj_type='HostSystem')\n\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n\n stats = {\n 'status': status,\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'cpu_count': cpu_count,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n }\n\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n\n self.log.debug('Beginning read callback')\n info = self.poll()\n\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n\n # report information for all vCenter servers\n for vcenter, data in info.items():\n # report datastore information\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n\n # report datacenter information\n for dc_name, dc_data in data['datacenter'].items():\n # extract any cluster and host information for later processing\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n\n # report cluster information\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n\n # report host information\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n\n if COLLECTD_ENABLED:\n self._handler_map = {\n logging.CRITICAL: collectd.error,\n logging.ERROR: collectd.error,\n logging.WARN: collectd.warning,\n logging.INFO: collectd.info,\n logging.DEBUG: collectd.info,\n }\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n\n if record.level == logging.DEBUG and not self.verbose:\n return\n\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\nif COLLECTD_ENABLED:\n instance = CollectdCollector([])\n\n collectd.register_config(instance.configure)\n collectd.register_read(instance.read)\n",
"step-ids": [
11,
13,
19,
20,
24
]
}
|
[
11,
13,
19,
20,
24
] |
#!/usr/bin/env python
# -------------------------------------------------------------------------
# Copyright (c) Microsoft, Intel Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import abc
import itertools
import os
import uuid
from enum import Enum
from pathlib import Path
from typing import Dict, Optional, Sequence, Tuple, Union
import numpy as np
import onnx
from onnx import ModelProto, TensorProto, helper, numpy_helper
import onnxruntime
from .quant_utils import apply_plot, load_model_with_shape_infer, smooth_distribution
class TensorData:
_allowed = frozenset(["avg", "std", "lowest", "highest", "hist", "hist_edges"])
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k not in TensorData._allowed:
raise ValueError(f"Unexpected value {k!r} not in {TensorData._allowed}.")
setattr(self, k, v)
@property
def range_value(self):
if not hasattr(self, "lowest") or not hasattr(self, "highest"):
raise AttributeError(f"Attributes 'lowest' and/or 'highest' missing in {dir(self)}.")
return (self.lowest, self.highest)
@property
def avg_std(self):
if not hasattr(self, "avg") or not hasattr(self, "std"):
raise AttributeError(f"Attributes 'avg' and/or 'std' missing in {dir(self)}.")
return (self.avg, self.std)
class TensorsData:
def __init__(self, calibration_method, data: Dict[str, Union[TensorData, Tuple]]):
self.calibration_method = calibration_method
self.data = {}
for k, v in data.items():
if not isinstance(k, str):
raise TypeError(f"Keys must be strings not {type(k)}.")
if isinstance(v, tuple):
if calibration_method == CalibrationMethod.MinMax and len(v) == 2:
self.data[k] = TensorData(lowest=v[0], highest=v[1])
continue
if len(v) == 4:
self.data[k] = TensorData(lowest=v[0], highest=v[1], histogram=v[2], bins=v[3])
continue
raise TypeError(f"Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.")
if not isinstance(v, TensorData):
raise TypeError(f"Values must be TensorData not {type(v)}.")
self.data[k] = v
def __iter__(self):
yield from self.data
def __contains__(self, key):
return key in self.data
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
if key not in self.data:
raise RuntimeError(f"Only an existing tensor can be modified, {key!r} is not.")
self.data[key] = value
def values(self):
return self.data.values()
class CalibrationMethod(Enum):
MinMax = 0
Entropy = 1
Percentile = 2
Distribution = 3
class CalibrationDataReader(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return hasattr(subclass, "get_next") and callable(subclass.get_next) or NotImplemented
@abc.abstractmethod
def get_next(self) -> dict:
"""generate the input data dict for ONNXinferenceSession run"""
raise NotImplementedError
def __iter__(self):
return self
def __next__(self):
result = self.get_next()
if result is None:
raise StopIteration
return result
class CalibraterBase:
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
symmetric=False,
use_external_data_format=False,
):
"""
:param model_path: ONNX model to calibrate. It should be a model file path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
"""
if isinstance(model_path, str):
self.model = load_model_with_shape_infer(Path(model_path))
elif isinstance(model_path, Path):
self.model = load_model_with_shape_infer(model_path)
else:
raise ValueError("model_path should be model path.")
self.op_types_to_calibrate = op_types_to_calibrate
self.augmented_model_path = augmented_model_path
self.symmetric = symmetric
self.use_external_data_format = use_external_data_format
self.augment_model = None
self.infer_session = None
self.execution_providers = ["CPUExecutionProvider"]
def set_execution_providers(self, execution_providers=["CPUExecutionProvider"]): # noqa: B006
"""
reset the execution providers to execute the collect_data. It triggers to re-creating inference session.
"""
self.execution_providers = execution_providers
self.create_inference_session()
def create_inference_session(self):
"""
create an OnnxRuntime InferenceSession.
"""
sess_options = onnxruntime.SessionOptions()
sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
self.infer_session = onnxruntime.InferenceSession(
self.augmented_model_path,
sess_options=sess_options,
providers=self.execution_providers,
)
def select_tensors_to_calibrate(self, model: ModelProto):
"""
select input/output tensors of candidate nodes to calibrate.
returns:
tensors (set): set of tensor name.
value_infos (dict): tensor name to value info.
"""
value_infos = {vi.name: vi for vi in model.graph.value_info}
value_infos.update({ot.name: ot for ot in model.graph.output})
value_infos.update({it.name: it for it in model.graph.input})
initializer = {init.name for init in model.graph.initializer}
tensors_to_calibrate = set()
tensor_type_to_calibrate = {TensorProto.FLOAT}
for node in model.graph.node:
if not self.op_types_to_calibrate or node.op_type in self.op_types_to_calibrate:
for tensor_name in itertools.chain(node.input, node.output):
if tensor_name in value_infos:
vi = value_infos[tensor_name]
if (
vi.type.HasField("tensor_type")
and (vi.type.tensor_type.elem_type in tensor_type_to_calibrate)
and (tensor_name not in initializer)
):
tensors_to_calibrate.add(tensor_name)
return tensors_to_calibrate, value_infos
def get_augment_model(self):
"""
return: augmented onnx model. Call after calling augment_graph
"""
return self.model
def augment_graph(self):
"""
abstract method: augment the input model to prepare for collecting data. It will:
1. augment the model to be able to collect desired statistics data
2. save augmented model to augmented_model_paths
"""
raise NotImplementedError
def collect_data(self, data_reader: CalibrationDataReader):
"""
abstract method: collect the tensors that will be used for range computation. It can be called multiple times.
"""
raise NotImplementedError
def compute_data(self) -> TensorsData:
"""
abstract method: compute data based on the calibration method stored in TensorsData
"""
raise NotImplementedError
class MinMaxCalibrater(CalibraterBase):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
symmetric=False,
use_external_data_format=False,
moving_average=False,
averaging_constant=0.01,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param symmetric: make range of tensor symmetric (central point is 0).
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.
:param averaging_constant: constant smoothing factor to use when computing the moving average.
"""
super().__init__(
model_path,
op_types_to_calibrate=op_types_to_calibrate,
augmented_model_path=augmented_model_path,
symmetric=symmetric,
use_external_data_format=use_external_data_format,
)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model.graph.output}
self.moving_average = moving_average
if moving_average and (averaging_constant < 0 or averaging_constant > 1):
raise ValueError("Invalid averaging constant, which should not be < 0 or > 1.")
self.averaging_constant = averaging_constant
def augment_graph(self):
"""
Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in
model and ensures their outputs are stored as part of the graph output
:return: augmented ONNX model
"""
tensors, _ = self.select_tensors_to_calibrate(self.model)
reshape_shape_name = str(uuid.uuid4())
reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.int64), reshape_shape_name)
self.model.graph.initializer.append(reshape_shape)
def add_reduce_min_max(tensor_name, reduce_op_name):
# When doing ReduceMax/ReduceMin, ORT can't reduce on dim with value of 0 if 'keepdims' is false.
# To make the code simple, we always let keepdims to be 1.
keepdims = 1
# Adding ReduceMin/ReduceMax nodes: ReduceMin/ReduceMax -> Reshape-> (output)
reduce_output = tensor_name + "_" + reduce_op_name
intermediate_output = reduce_output + "_Reshape"
reduce_node = onnx.helper.make_node(
reduce_op_name, [tensor_name], [intermediate_output], keepdims=keepdims, name=reduce_output
)
reshape_node = onnx.helper.make_node(
"Reshape",
inputs=[intermediate_output, reshape_shape_name],
outputs=[reduce_output],
name=intermediate_output,
)
self.model.graph.node.extend([reduce_node, reshape_node])
self.model.graph.output.append(helper.make_tensor_value_info(reduce_output, TensorProto.FLOAT, [1]))
for tensor in tensors:
add_reduce_min_max(tensor, "ReduceMin")
add_reduce_min_max(tensor, "ReduceMax")
onnx.save(
self.model,
self.augmented_model_path,
save_as_external_data=self.use_external_data_format,
)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None, inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError("No data is collected.")
t = self.compute_data()
if not isinstance(t, TensorsData):
raise TypeError(f"compute_data must return a TensorsData not {type(t)}.")
self.clear_collected_data()
def merge_range(self, old_range, new_range):
if not old_range:
return new_range
for key, value in old_range.items():
if self.moving_average:
min_value = value[0] + self.averaging_constant * (new_range[key][0] - value[0])
max_value = value[1] + self.averaging_constant * (new_range[key][1] - value[1])
else:
min_value = min(value[0], new_range[key][0])
max_value = max(value[1], new_range[key][1])
new_range[key] = (min_value, max_value)
return new_range
def compute_data(self) -> TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }
"""
if len(self.intermediate_outputs) == 0:
return self.calibrate_tensors_range
output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs
]
merged_output_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_output_dict.setdefault(k, []).append(v)
added_output_names = output_names[self.num_model_outputs :]
calibrate_tensor_names = [
added_output_names[i].rpartition("_")[0] for i in range(0, len(added_output_names), 2)
] # output names
merged_added_output_dict = {
i: merged_output_dict[i] for i in merged_output_dict if i not in self.model_original_outputs
}
pairs = []
for i in range(0, len(added_output_names), 2):
min_value = 0
max_value = 0
if self.moving_average:
min_value_array = np.mean(merged_added_output_dict[added_output_names[i]], axis=0)
max_value_array = np.mean(merged_added_output_dict[added_output_names[i + 1]], axis=0)
else:
min_value_array = min(merged_added_output_dict[added_output_names[i]])
max_value_array = max(merged_added_output_dict[added_output_names[i + 1]])
if type(min_value_array) == int or min_value_array.size > 0:
min_value = float(min_value_array)
if type(max_value_array) == int or max_value_array.size > 0:
max_value = float(max_value_array)
if self.symmetric:
max_absolute_value = max(abs(min_value), abs(max_value))
pairs.append(tuple([-max_absolute_value, max_absolute_value]))
else:
pairs.append(tuple([min_value, max_value]))
new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax, dict(zip(calibrate_tensor_names, pairs)))
if self.calibrate_tensors_range:
self.calibrate_tensors_range = self.merge_range(self.calibrate_tensors_range, new_calibrate_tensors_range)
else:
self.calibrate_tensors_range = new_calibrate_tensors_range
return self.calibrate_tensors_range
class HistogramCalibrater(CalibraterBase):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="percentile",
symmetric=False,
num_bins=128,
num_quantized_bins=2048,
percentile=99.999,
scenario="same",
):
"""
:param model_path: ONNX model to calibrate. It is a model path.
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
:param scenario: see :class:`DistributionCalibrater`
"""
super().__init__(
model_path,
op_types_to_calibrate=op_types_to_calibrate,
augmented_model_path=augmented_model_path,
symmetric=symmetric,
use_external_data_format=use_external_data_format,
)
self.intermediate_outputs = []
self.calibrate_tensors_range = None
self.num_model_outputs = len(self.model.graph.output)
self.model_original_outputs = {output.name for output in self.model.graph.output}
self.collector = None
self.method = method
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.tensors_to_calibrate = None
self.scenario = scenario
def augment_graph(self):
"""
make all quantization_candidates op type nodes as part of the graph output.
:return: augmented ONNX model
"""
self.tensors_to_calibrate, value_infos = self.select_tensors_to_calibrate(self.model)
for tensor in self.tensors_to_calibrate:
if tensor not in self.model_original_outputs:
self.model.graph.output.append(value_infos[tensor])
onnx.save(
self.model,
self.augmented_model_path,
save_as_external_data=self.use_external_data_format,
)
def clear_collected_data(self):
self.intermediate_outputs = []
def collect_data(self, data_reader: CalibrationDataReader):
"""
Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.
"""
while True:
inputs = data_reader.get_next()
if not inputs:
break
self.intermediate_outputs.append(self.infer_session.run(None, inputs))
if len(self.intermediate_outputs) == 0:
raise ValueError("No data is collected.")
output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]
output_dicts_list = [
dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs
]
merged_dict = {}
for d in output_dicts_list:
for k, v in d.items():
merged_dict.setdefault(k, []).append(v)
clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in self.tensors_to_calibrate}
if not self.collector:
self.collector = HistogramCollector(
method=self.method,
symmetric=self.symmetric,
num_bins=self.num_bins,
num_quantized_bins=self.num_quantized_bins,
percentile=self.percentile,
scenario=self.scenario,
)
self.collector.collect(clean_merged_dict)
self.clear_collected_data()
def compute_data(self) -> TensorsData:
"""
Compute the min-max range of tensor
:return: dictionary mapping: {tensor name: (min value, max value)}
"""
if not self.collector:
raise ValueError("No collector created and can't generate calibration data.")
if isinstance(self, EntropyCalibrater):
cal = CalibrationMethod.Entropy
elif isinstance(self, PercentileCalibrater):
cal = CalibrationMethod.Percentile
elif isinstance(self, DistributionCalibrater):
cal = CalibrationMethod.Distribution
else:
raise TypeError(f"Unknown calibrater {type(self)}. This method must be overwritten.")
return TensorsData(cal, self.collector.compute_collection_result())
class EntropyCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="entropy",
symmetric=False,
num_bins=128,
num_quantized_bins=128,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param num_quantized_bins: number of quantized bins. Default 128.
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
symmetric=symmetric,
num_bins=num_bins,
num_quantized_bins=num_quantized_bins,
)
class PercentileCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="percentile",
symmetric=False,
num_bins=2048,
percentile=99.999,
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_quantized_bins: number of quantized bins. Default 128.
:param percentile: A float number between [0, 100]. Default 99.99.
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
symmetric=symmetric,
num_bins=num_bins,
percentile=percentile,
)
class DistributionCalibrater(HistogramCalibrater):
def __init__(
self,
model_path: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
use_external_data_format=False,
method="distribution",
num_bins=128,
scenario="same",
):
"""
:param model_path: ONNX model to calibrate. It is a model path
:param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.
:param augmented_model_path: save augmented model to this path.
:param use_external_data_format: use external data format to store model which size is >= 2Gb
:param method: A string. One of ['entropy', 'percentile', 'distribution'].
:param symmetric: make range of tensor symmetric (central point is 0).
:param num_bins: number of bins to create a new histogram for collecting tensor values.
:param scenario: for float 8 only, if `scenario="same"`,
the algorithm weights and float 8 follow the same distribution,
if `scenario="p3"`, it assumes the weights follow
a gaussian law and float 8 ~ X^3 where X is a gaussian law
"""
super().__init__(
model_path,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format,
method=method,
num_bins=num_bins,
scenario=scenario,
)
class CalibrationDataCollector(metaclass=abc.ABCMeta):
"""
Base class for collecting data for calibration-based quantization.
"""
@abc.abstractmethod
def collect(self, name_to_arr):
"""
Generate informative data based on given data.
name_to_arr : dict
tensor name to NDArray data
"""
raise NotImplementedError
@abc.abstractmethod
def compute_collection_result(self):
"""
Get the optimal result among collection data.
"""
raise NotImplementedError
class HistogramCollector(CalibrationDataCollector):
"""
Collecting histogram for each tensor. Percentile and Entropy method are supported.
ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/
pytorch_quantization/calib/histogram.html
"""
def __init__(self, method, symmetric, num_bins, num_quantized_bins, percentile, scenario):
self.histogram_dict = {}
self.method = method
self.symmetric = symmetric
self.num_bins = num_bins
self.num_quantized_bins = num_quantized_bins
self.percentile = percentile
self.scenario = scenario
def get_histogram_dict(self):
return self.histogram_dict
def collect(self, name_to_arr):
print("Collecting tensor data and making histogram ...")
# TODO: Currently we have different collect() for entropy and percentile method respectively.
# Need unified collect in the future.
if self.method in {"distribution", "entropy"}:
return self.collect_value(name_to_arr)
elif self.method == "percentile":
if self.symmetric:
return self.collect_absolute_value(name_to_arr)
else:
return self.collect_value(name_to_arr)
else:
raise ValueError("Only 'entropy', 'percentile' or 'distribution' methods are supported")
def collect_absolute_value(self, name_to_arr):
"""
Collect histogram on absolute value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr) # noqa: PLW2901
data_arr = data_arr.flatten() # noqa: PLW2901
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
data_arr = np.absolute(data_arr) # only consider absolute value # noqa: PLW2901
if tensor not in self.histogram_dict:
# first time it uses num_bins to compute histogram.
hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)
self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value)
else:
old_histogram = self.histogram_dict[tensor]
old_min = old_histogram[2]
old_max = old_histogram[3]
old_hist = old_histogram[0]
old_hist_edges = old_histogram[1]
temp_amax = np.max(data_arr)
if temp_amax > old_hist_edges[-1]:
# increase the number of bins
width = old_hist_edges[1] - old_hist_edges[0]
# NOTE: np.arange may create an extra bin after the one containing temp_amax
new_bin_edges = np.arange(old_hist_edges[-1] + width, temp_amax + width, width)
old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))
hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)
hist[: len(old_hist)] += old_hist
self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value))
def collect_value(self, name_to_arr):
"""
Collect histogram on real value
"""
for tensor, data_arr in name_to_arr.items():
data_arr = np.asarray(data_arr) # noqa: PLW2901
data_arr = data_arr.flatten() # noqa: PLW2901
if data_arr.size > 0:
min_value = np.min(data_arr)
max_value = np.max(data_arr)
else:
min_value = 0
max_value = 0
threshold = max(abs(min_value), abs(max_value))
if tensor in self.histogram_dict:
old_histogram = self.histogram_dict[tensor]
self.histogram_dict[tensor] = self.merge_histogram(
old_histogram, data_arr, min_value, max_value, threshold
)
else:
hist, hist_edges = np.histogram(data_arr, self.num_bins, range=(-threshold, threshold))
self.histogram_dict[tensor] = (
hist,
hist_edges,
min_value,
max_value,
threshold,
)
def merge_histogram(self, old_histogram, data_arr, new_min, new_max, new_threshold):
(old_hist, old_hist_edges, old_min, old_max, old_threshold) = old_histogram
if new_threshold <= old_threshold:
new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-old_threshold, old_threshold))
return (
new_hist + old_hist,
old_hist_edges,
min(old_min, new_min),
max(old_max, new_max),
old_threshold,
)
else:
if old_threshold == 0:
hist, hist_edges = np.histogram(data_arr, len(old_hist), range=(-new_threshold, new_threshold))
hist += old_hist
else:
old_num_bins = len(old_hist)
old_stride = 2 * old_threshold / old_num_bins
half_increased_bins = int((new_threshold - old_threshold) // old_stride + 1)
new_num_bins = old_num_bins + 2 * half_increased_bins
new_threshold = half_increased_bins * old_stride + old_threshold
hist, hist_edges = np.histogram(data_arr, new_num_bins, range=(-new_threshold, new_threshold))
hist[half_increased_bins : new_num_bins - half_increased_bins] += old_hist
return (
hist,
hist_edges,
min(old_min, new_min),
max(old_max, new_max),
new_threshold,
)
def compute_collection_result(self):
if not self.histogram_dict or len(self.histogram_dict) == 0:
raise ValueError("Histogram has not been collected. Please run collect() first.")
print(f"Finding optimal threshold for each tensor using {self.method} algorithm ...")
if self.method == "entropy":
return self.compute_entropy()
elif self.method == "percentile":
return self.compute_percentile()
elif self.method == "distribution":
return self.compute_distribution()
else:
raise ValueError("Only 'entropy', 'percentile' or 'distribution' methods are supported")
def compute_percentile(self):
if self.percentile < 0 or self.percentile > 100:
raise ValueError("Invalid percentile. Must be in range 0 <= percentile <= 100.")
histogram_dict = self.histogram_dict
percentile = self.percentile
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(f"Number of histogram bins : {self.num_bins}")
print(f"Percentile : ({100.0 - percentile},{percentile})")
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
total = hist.sum()
cdf = np.cumsum(hist / total)
if self.symmetric:
idx_right = np.searchsorted(cdf, percentile / 100.0)
thresholds_dict[tensor] = (
-float(hist_edges[idx_right]),
float(hist_edges[idx_right]),
)
else:
percent_to_cut_one_side = (100.0 - percentile) / 200.0
idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)
idx_left = np.searchsorted(cdf, percent_to_cut_one_side)
thresholds_dict[tensor] = (
float(hist_edges[idx_left]),
float(hist_edges[idx_right]),
)
min_value = histogram[2]
max_value = histogram[3]
if thresholds_dict[tensor][0] < min_value:
thresholds_dict[tensor] = (min_value, thresholds_dict[tensor][1])
if thresholds_dict[tensor][1] > max_value:
thresholds_dict[tensor] = (thresholds_dict[tensor][0], max_value)
thresholds_dict[tensor] = (*thresholds_dict[tensor], *hist[:2])
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(hist, hist_edges)
return thresholds_dict
def compute_entropy(self):
histogram_dict = self.histogram_dict
num_quantized_bins = self.num_quantized_bins
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(
"Number of histogram bins : {} (The number may increase depends on the data it collects)".format(
self.num_bins
)
)
print(f"Number of quantized bins : {self.num_quantized_bins}")
for tensor, histogram in histogram_dict.items():
optimal_threshold = self.get_entropy_threshold(histogram, num_quantized_bins)
thresholds_dict[tensor] = optimal_threshold
thresholds_dict[tensor] = (*optimal_threshold, *histogram[:2])
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(histogram[0], histogram[1])
return thresholds_dict
@staticmethod
def _avg_std(hist, hist_edges, power=1):
if power <= 0:
raise ValueError(f"power={power} <= 0 is invalid.")
values = (hist_edges[:-1] + hist_edges[1:]) * 0.5
if power == 1:
avg = (hist * values).sum() / hist.sum()
std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5
return avg, std
if int(power) == power and int(power) % 2 == 1:
avg = (hist * values**power).sum() / hist.sum()
std = ((hist * (values**power - avg) ** 2).sum() / hist.sum()) ** 0.5
return avg, std
fact = np.abs(values) / values
fact[np.isnan(fact)] = 1
fact[np.isinf(fact)] = 1
values = np.abs(values) ** power * fact
avg = (hist * values).sum() / hist.sum()
std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5
return avg, std
def compute_distribution(self):
if self.num_bins < 512:
raise ValueError("Invalid num_bins. Must be in range 512 <= num_bins.")
histogram_dict = self.histogram_dict
thresholds_dict = {} # per tensor thresholds
print(f"Number of tensors : {len(histogram_dict)}")
print(f"Number of histogram bins : {self.num_bins}")
print(f"Scenario : {self.scenario!r})")
for tensor, histogram in histogram_dict.items():
hist = histogram[0]
hist_edges = histogram[1]
if self.scenario == "same":
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)
elif self.scenario == "p3":
avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1.0 / 3.0)
else:
raise ValueError("Invalid scenario. Must be in {'same', 'p3'}.")
thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef, hist=hist, hist_edges=hist_edges)
# Plot histogram for debug only
if os.environ.get("QUANTIZATION_DEBUG", 0) in (1, "1"):
apply_plot(hist, hist_edges)
return thresholds_dict
def get_entropy_threshold(self, histogram, num_quantized_bins):
"""Given a dataset, find the optimal threshold for quantizing it.
The reference distribution is `q`, and the candidate distribution is `p`.
`q` is a truncated version of the original distribution.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
"""
import copy
from scipy.stats import entropy
hist = histogram[0]
hist_edges = histogram[1]
num_bins = hist.size
zero_bin_index = num_bins // 2
num_half_quantized_bin = num_quantized_bins // 2
kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)
thresholds = [(0, 0) for i in range(kl_divergence.size)]
# <------------ num bins ---------------->
# <--- quantized bins ---->
# |======|===========|===========|=======|
# zero bin index
# ^ ^
# | |
# start index end index (start of iteration)
# ^ ^
# | |
# start index end index ...
# ^ ^
# | |
# start index end index (end of iteration)
for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):
start_index = zero_bin_index - i
end_index = zero_bin_index + i + 1 if (zero_bin_index + i + 1) <= num_bins else num_bins
thresholds[i - num_half_quantized_bin] = (
float(hist_edges[start_index]),
float(hist_edges[end_index]),
)
sliced_distribution = copy.deepcopy(hist[start_index:end_index])
# reference distribution p
p = sliced_distribution.copy() # a copy of np array
left_outliers_count = sum(hist[:start_index])
right_outliers_count = sum(hist[end_index:])
p[0] += left_outliers_count
p[-1] += right_outliers_count
# nonzeros[i] incidates whether p[i] is non-zero
nonzeros = (p != 0).astype(np.int64)
# quantize p.size bins into quantized bins (default 128 bins)
quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)
num_merged_bins = sliced_distribution.size // num_quantized_bins
# merge bins into quantized bins
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
quantized_bins[index] = sum(sliced_distribution[start:end])
quantized_bins[-1] += sum(sliced_distribution[num_quantized_bins * num_merged_bins :])
# in order to compare p and q, we need to make length of q equals to length of p
# expand quantized bins into p.size bins
q = np.zeros(p.size, dtype=np.int64)
for index in range(num_quantized_bins):
start = index * num_merged_bins
end = start + num_merged_bins
norm = sum(nonzeros[start:end])
if norm != 0:
q[start:end] = float(quantized_bins[index]) / float(norm)
p = smooth_distribution(p)
q = smooth_distribution(q)
if isinstance(q, np.ndarray):
kl_divergence[i - num_half_quantized_bin] = entropy(p, q)
else:
kl_divergence[i - num_half_quantized_bin] = float("inf")
min_kl_divergence_idx = np.argmin(kl_divergence)
optimal_threshold = thresholds[min_kl_divergence_idx]
min_value = histogram[2]
max_value = histogram[3]
if optimal_threshold[0] < min_value:
optimal_threshold = (min_value, optimal_threshold[1])
if optimal_threshold[1] > max_value:
optimal_threshold = (optimal_threshold[0], max_value)
return optimal_threshold
def create_calibrator(
model: Union[str, Path],
op_types_to_calibrate: Optional[Sequence[str]] = None,
augmented_model_path="augmented_model.onnx",
calibrate_method=CalibrationMethod.MinMax,
use_external_data_format=False,
extra_options={}, # noqa: B006
):
calibrator = None
if calibrate_method == CalibrationMethod.MinMax:
# default settings for min-max algorithm
symmetric = False if "symmetric" not in extra_options else extra_options["symmetric"]
moving_average = False if "moving_average" not in extra_options else extra_options["moving_average"]
averaging_constant = 0.01 if "averaging_constant" not in extra_options else extra_options["averaging_constant"]
calibrator = MinMaxCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
moving_average=moving_average,
averaging_constant=averaging_constant,
)
elif calibrate_method == CalibrationMethod.Entropy:
# default settings for entropy algorithm
num_bins = 128 if "num_bins" not in extra_options else extra_options["num_bins"]
num_quantized_bins = 128 if "num_quantized_bins" not in extra_options else extra_options["num_quantized_bins"]
symmetric = False if "symmetric" not in extra_options else extra_options["symmetric"]
calibrator = EntropyCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
num_bins=num_bins,
num_quantized_bins=num_quantized_bins,
)
elif calibrate_method == CalibrationMethod.Percentile:
# default settings for percentile algorithm
num_bins = 2048 if "num_bins" not in extra_options else extra_options["num_bins"]
percentile = 99.999 if "percentile" not in extra_options else extra_options["percentile"]
symmetric = True if "symmetric" not in extra_options else extra_options["symmetric"]
calibrator = PercentileCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
symmetric=symmetric,
num_bins=num_bins,
percentile=percentile,
)
elif calibrate_method == CalibrationMethod.Distribution:
# default settings for percentile algorithm
num_bins = 2048 if "num_bins" not in extra_options else extra_options["num_bins"]
scenario = "same" if "scenario" not in extra_options else extra_options["scenario"]
calibrator = DistributionCalibrater(
model,
op_types_to_calibrate,
augmented_model_path,
use_external_data_format=use_external_data_format,
num_bins=num_bins,
scenario=scenario,
)
if calibrator:
calibrator.augment_graph()
calibrator.create_inference_session()
return calibrator
raise ValueError(f"Unsupported calibration method {calibrate_method}")
|
normal
|
{
"blob_id": "a61132d2d504ed31d4e1e7889bde670853968559",
"index": 5739,
"step-1": "<mask token>\n\n\nclass CalibraterBase:\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=False\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError('model_path should be model path.')\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = ['CPUExecutionProvider']\n\n def set_execution_providers(self, execution_providers=[\n 'CPUExecutionProvider']):\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = (onnxruntime.\n GraphOptimizationLevel.ORT_DISABLE_ALL)\n self.infer_session = onnxruntime.InferenceSession(self.\n augmented_model_path, sess_options=sess_options, providers=self\n .execution_providers)\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n for node in model.graph.node:\n if (not self.op_types_to_calibrate or node.op_type in self.\n op_types_to_calibrate):\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (vi.type.HasField('tensor_type') and vi.type.\n tensor_type.elem_type in\n tensor_type_to_calibrate and tensor_name not in\n initializer):\n tensors_to_calibrate.add(tensor_name)\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=\n False, moving_average=False, averaging_constant=0.01):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1\n ):\n raise ValueError(\n 'Invalid averaging constant, which should not be < 0 or > 1.')\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.\n int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n keepdims = 1\n reduce_output = tensor_name + '_' + reduce_op_name\n intermediate_output = reduce_output + '_Reshape'\n reduce_node = onnx.helper.make_node(reduce_op_name, [\n tensor_name], [intermediate_output], keepdims=keepdims,\n name=reduce_output)\n reshape_node = onnx.helper.make_node('Reshape', inputs=[\n intermediate_output, reshape_shape_name], outputs=[\n reduce_output], name=intermediate_output)\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(\n reduce_output, TensorProto.FLOAT, [1]))\n for tensor in tensors:\n add_reduce_min_max(tensor, 'ReduceMin')\n add_reduce_min_max(tensor, 'ReduceMax')\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(\n f'compute_data must return a TensorsData not {type(t)}.')\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range\n [key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range\n [key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = min_value, max_value\n return new_range\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs:]\n calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for\n i in range(0, len(added_output_names), 2)]\n merged_added_output_dict = {i: merged_output_dict[i] for i in\n merged_output_dict if i not in self.model_original_outputs}\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[\n added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[\n added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[\n added_output_names[i]])\n max_value_array = max(merged_added_output_dict[\n added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,\n dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.\n calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=128, num_quantized_bins=\n 2048, percentile=99.999, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = (self.\n select_tensors_to_calibrate(self.model))\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in\n self.tensors_to_calibrate}\n if not self.collector:\n self.collector = HistogramCollector(method=self.method,\n symmetric=self.symmetric, num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins, percentile=self\n .percentile, scenario=self.scenario)\n self.collector.collect(clean_merged_dict)\n self.clear_collected_data()\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\n \"No collector created and can't generate calibration data.\")\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(\n f'Unknown calibrater {type(self)}. This method must be overwritten.'\n )\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, num_quantized_bins=\n num_quantized_bins)\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=2048, percentile=99.999):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, percentile=percentile)\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'distribution', num_bins=128, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n num_bins=num_bins, scenario=scenario)\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins,\n percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print('Collecting tensor data and making histogram ...')\n if self.method in {'distribution', 'entropy'}:\n return self.collect_value(name_to_arr)\n elif self.method == 'percentile':\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n data_arr = np.absolute(data_arr)\n if tensor not in self.histogram_dict:\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n width = old_hist_edges[1] - old_hist_edges[0]\n new_bin_edges = np.arange(old_hist_edges[-1] + width, \n temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[:len(old_hist)] += old_hist\n self.histogram_dict[tensor] = hist, hist_edges, min(old_min,\n min_value), max(old_max, max_value)\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n threshold = max(abs(min_value), abs(max_value))\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold)\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins,\n range=(-threshold, threshold))\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value, threshold\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max,\n new_threshold):\n old_hist, old_hist_edges, old_min, old_max, old_threshold = (\n old_histogram)\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-\n old_threshold, old_threshold))\n return new_hist + old_hist, old_hist_edges, min(old_min, new_min\n ), max(old_max, new_max), old_threshold\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist),\n range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) //\n old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = (half_increased_bins * old_stride +\n old_threshold)\n hist, hist_edges = np.histogram(data_arr, new_num_bins,\n range=(-new_threshold, new_threshold))\n hist[half_increased_bins:new_num_bins - half_increased_bins\n ] += old_hist\n return hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_threshold\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\n 'Histogram has not been collected. Please run collect() first.'\n )\n print(\n f'Finding optimal threshold for each tensor using {self.method} algorithm ...'\n )\n if self.method == 'entropy':\n return self.compute_entropy()\n elif self.method == 'percentile':\n return self.compute_percentile()\n elif self.method == 'distribution':\n return self.compute_distribution()\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\n 'Invalid percentile. Must be in range 0 <= percentile <= 100.')\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Percentile : ({100.0 - percentile},{percentile})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(\n hist_edges[idx_right])\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = float(hist_edges[idx_left]), float(\n hist_edges[idx_right])\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value\n thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(\n 'Number of histogram bins : {} (The number may increase depends on the data it collects)'\n .format(self.num_bins))\n print(f'Number of quantized bins : {self.num_quantized_bins}')\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram,\n num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(histogram[0], histogram[1])\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f'power={power} <= 0 is invalid.')\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values ** power).sum() / hist.sum()\n std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()\n ) ** 0.5\n return avg, std\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\n 'Invalid num_bins. Must be in range 512 <= num_bins.')\n histogram_dict = self.histogram_dict\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Scenario : {self.scenario!r})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n if self.scenario == 'same':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == 'p3':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=\n 1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\"\n )\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,\n hist=hist, hist_edges=hist_edges)\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n from scipy.stats import entropy\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=\n num_bins else num_bins)\n thresholds[i - num_half_quantized_bin] = float(hist_edges[\n start_index]), float(hist_edges[end_index])\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n p = sliced_distribution.copy()\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n nonzeros = (p != 0).astype(np.int64)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[\n num_quantized_bins * num_merged_bins:])\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float('inf')\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = min_value, optimal_threshold[1]\n if optimal_threshold[1] > max_value:\n optimal_threshold = optimal_threshold[0], max_value\n return optimal_threshold\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TensorsData:\n\n def __init__(self, calibration_method, data: Dict[str, Union[TensorData,\n Tuple]]):\n self.calibration_method = calibration_method\n self.data = {}\n for k, v in data.items():\n if not isinstance(k, str):\n raise TypeError(f'Keys must be strings not {type(k)}.')\n if isinstance(v, tuple):\n if calibration_method == CalibrationMethod.MinMax and len(v\n ) == 2:\n self.data[k] = TensorData(lowest=v[0], highest=v[1])\n continue\n if len(v) == 4:\n self.data[k] = TensorData(lowest=v[0], highest=v[1],\n histogram=v[2], bins=v[3])\n continue\n raise TypeError(\n f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'\n )\n if not isinstance(v, TensorData):\n raise TypeError(f'Values must be TensorData not {type(v)}.')\n self.data[k] = v\n\n def __iter__(self):\n yield from self.data\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CalibrationMethod(Enum):\n MinMax = 0\n Entropy = 1\n Percentile = 2\n Distribution = 3\n\n\nclass CalibrationDataReader(metaclass=abc.ABCMeta):\n\n @classmethod\n def __subclasshook__(cls, subclass):\n return hasattr(subclass, 'get_next') and callable(subclass.get_next\n ) or NotImplemented\n\n @abc.abstractmethod\n def get_next(self) ->dict:\n \"\"\"generate the input data dict for ONNXinferenceSession run\"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.get_next()\n if result is None:\n raise StopIteration\n return result\n\n\nclass CalibraterBase:\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=False\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError('model_path should be model path.')\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = ['CPUExecutionProvider']\n\n def set_execution_providers(self, execution_providers=[\n 'CPUExecutionProvider']):\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = (onnxruntime.\n GraphOptimizationLevel.ORT_DISABLE_ALL)\n self.infer_session = onnxruntime.InferenceSession(self.\n augmented_model_path, sess_options=sess_options, providers=self\n .execution_providers)\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n for node in model.graph.node:\n if (not self.op_types_to_calibrate or node.op_type in self.\n op_types_to_calibrate):\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (vi.type.HasField('tensor_type') and vi.type.\n tensor_type.elem_type in\n tensor_type_to_calibrate and tensor_name not in\n initializer):\n tensors_to_calibrate.add(tensor_name)\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=\n False, moving_average=False, averaging_constant=0.01):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1\n ):\n raise ValueError(\n 'Invalid averaging constant, which should not be < 0 or > 1.')\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.\n int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n keepdims = 1\n reduce_output = tensor_name + '_' + reduce_op_name\n intermediate_output = reduce_output + '_Reshape'\n reduce_node = onnx.helper.make_node(reduce_op_name, [\n tensor_name], [intermediate_output], keepdims=keepdims,\n name=reduce_output)\n reshape_node = onnx.helper.make_node('Reshape', inputs=[\n intermediate_output, reshape_shape_name], outputs=[\n reduce_output], name=intermediate_output)\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(\n reduce_output, TensorProto.FLOAT, [1]))\n for tensor in tensors:\n add_reduce_min_max(tensor, 'ReduceMin')\n add_reduce_min_max(tensor, 'ReduceMax')\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(\n f'compute_data must return a TensorsData not {type(t)}.')\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range\n [key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range\n [key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = min_value, max_value\n return new_range\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs:]\n calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for\n i in range(0, len(added_output_names), 2)]\n merged_added_output_dict = {i: merged_output_dict[i] for i in\n merged_output_dict if i not in self.model_original_outputs}\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[\n added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[\n added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[\n added_output_names[i]])\n max_value_array = max(merged_added_output_dict[\n added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,\n dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.\n calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=128, num_quantized_bins=\n 2048, percentile=99.999, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = (self.\n select_tensors_to_calibrate(self.model))\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in\n self.tensors_to_calibrate}\n if not self.collector:\n self.collector = HistogramCollector(method=self.method,\n symmetric=self.symmetric, num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins, percentile=self\n .percentile, scenario=self.scenario)\n self.collector.collect(clean_merged_dict)\n self.clear_collected_data()\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\n \"No collector created and can't generate calibration data.\")\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(\n f'Unknown calibrater {type(self)}. This method must be overwritten.'\n )\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, num_quantized_bins=\n num_quantized_bins)\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=2048, percentile=99.999):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, percentile=percentile)\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'distribution', num_bins=128, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n num_bins=num_bins, scenario=scenario)\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins,\n percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print('Collecting tensor data and making histogram ...')\n if self.method in {'distribution', 'entropy'}:\n return self.collect_value(name_to_arr)\n elif self.method == 'percentile':\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n data_arr = np.absolute(data_arr)\n if tensor not in self.histogram_dict:\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n width = old_hist_edges[1] - old_hist_edges[0]\n new_bin_edges = np.arange(old_hist_edges[-1] + width, \n temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[:len(old_hist)] += old_hist\n self.histogram_dict[tensor] = hist, hist_edges, min(old_min,\n min_value), max(old_max, max_value)\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n threshold = max(abs(min_value), abs(max_value))\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold)\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins,\n range=(-threshold, threshold))\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value, threshold\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max,\n new_threshold):\n old_hist, old_hist_edges, old_min, old_max, old_threshold = (\n old_histogram)\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-\n old_threshold, old_threshold))\n return new_hist + old_hist, old_hist_edges, min(old_min, new_min\n ), max(old_max, new_max), old_threshold\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist),\n range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) //\n old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = (half_increased_bins * old_stride +\n old_threshold)\n hist, hist_edges = np.histogram(data_arr, new_num_bins,\n range=(-new_threshold, new_threshold))\n hist[half_increased_bins:new_num_bins - half_increased_bins\n ] += old_hist\n return hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_threshold\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\n 'Histogram has not been collected. Please run collect() first.'\n )\n print(\n f'Finding optimal threshold for each tensor using {self.method} algorithm ...'\n )\n if self.method == 'entropy':\n return self.compute_entropy()\n elif self.method == 'percentile':\n return self.compute_percentile()\n elif self.method == 'distribution':\n return self.compute_distribution()\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\n 'Invalid percentile. Must be in range 0 <= percentile <= 100.')\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Percentile : ({100.0 - percentile},{percentile})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(\n hist_edges[idx_right])\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = float(hist_edges[idx_left]), float(\n hist_edges[idx_right])\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value\n thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(\n 'Number of histogram bins : {} (The number may increase depends on the data it collects)'\n .format(self.num_bins))\n print(f'Number of quantized bins : {self.num_quantized_bins}')\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram,\n num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(histogram[0], histogram[1])\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f'power={power} <= 0 is invalid.')\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values ** power).sum() / hist.sum()\n std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()\n ) ** 0.5\n return avg, std\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\n 'Invalid num_bins. Must be in range 512 <= num_bins.')\n histogram_dict = self.histogram_dict\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Scenario : {self.scenario!r})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n if self.scenario == 'same':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == 'p3':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=\n 1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\"\n )\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,\n hist=hist, hist_edges=hist_edges)\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n from scipy.stats import entropy\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=\n num_bins else num_bins)\n thresholds[i - num_half_quantized_bin] = float(hist_edges[\n start_index]), float(hist_edges[end_index])\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n p = sliced_distribution.copy()\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n nonzeros = (p != 0).astype(np.int64)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[\n num_quantized_bins * num_merged_bins:])\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float('inf')\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = min_value, optimal_threshold[1]\n if optimal_threshold[1] > max_value:\n optimal_threshold = optimal_threshold[0], max_value\n return optimal_threshold\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TensorsData:\n\n def __init__(self, calibration_method, data: Dict[str, Union[TensorData,\n Tuple]]):\n self.calibration_method = calibration_method\n self.data = {}\n for k, v in data.items():\n if not isinstance(k, str):\n raise TypeError(f'Keys must be strings not {type(k)}.')\n if isinstance(v, tuple):\n if calibration_method == CalibrationMethod.MinMax and len(v\n ) == 2:\n self.data[k] = TensorData(lowest=v[0], highest=v[1])\n continue\n if len(v) == 4:\n self.data[k] = TensorData(lowest=v[0], highest=v[1],\n histogram=v[2], bins=v[3])\n continue\n raise TypeError(\n f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'\n )\n if not isinstance(v, TensorData):\n raise TypeError(f'Values must be TensorData not {type(v)}.')\n self.data[k] = v\n\n def __iter__(self):\n yield from self.data\n <mask token>\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n if key not in self.data:\n raise RuntimeError(\n f'Only an existing tensor can be modified, {key!r} is not.')\n self.data[key] = value\n\n def values(self):\n return self.data.values()\n\n\nclass CalibrationMethod(Enum):\n MinMax = 0\n Entropy = 1\n Percentile = 2\n Distribution = 3\n\n\nclass CalibrationDataReader(metaclass=abc.ABCMeta):\n\n @classmethod\n def __subclasshook__(cls, subclass):\n return hasattr(subclass, 'get_next') and callable(subclass.get_next\n ) or NotImplemented\n\n @abc.abstractmethod\n def get_next(self) ->dict:\n \"\"\"generate the input data dict for ONNXinferenceSession run\"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.get_next()\n if result is None:\n raise StopIteration\n return result\n\n\nclass CalibraterBase:\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=False\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError('model_path should be model path.')\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = ['CPUExecutionProvider']\n\n def set_execution_providers(self, execution_providers=[\n 'CPUExecutionProvider']):\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = (onnxruntime.\n GraphOptimizationLevel.ORT_DISABLE_ALL)\n self.infer_session = onnxruntime.InferenceSession(self.\n augmented_model_path, sess_options=sess_options, providers=self\n .execution_providers)\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n for node in model.graph.node:\n if (not self.op_types_to_calibrate or node.op_type in self.\n op_types_to_calibrate):\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (vi.type.HasField('tensor_type') and vi.type.\n tensor_type.elem_type in\n tensor_type_to_calibrate and tensor_name not in\n initializer):\n tensors_to_calibrate.add(tensor_name)\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=\n False, moving_average=False, averaging_constant=0.01):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1\n ):\n raise ValueError(\n 'Invalid averaging constant, which should not be < 0 or > 1.')\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.\n int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n keepdims = 1\n reduce_output = tensor_name + '_' + reduce_op_name\n intermediate_output = reduce_output + '_Reshape'\n reduce_node = onnx.helper.make_node(reduce_op_name, [\n tensor_name], [intermediate_output], keepdims=keepdims,\n name=reduce_output)\n reshape_node = onnx.helper.make_node('Reshape', inputs=[\n intermediate_output, reshape_shape_name], outputs=[\n reduce_output], name=intermediate_output)\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(\n reduce_output, TensorProto.FLOAT, [1]))\n for tensor in tensors:\n add_reduce_min_max(tensor, 'ReduceMin')\n add_reduce_min_max(tensor, 'ReduceMax')\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(\n f'compute_data must return a TensorsData not {type(t)}.')\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range\n [key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range\n [key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = min_value, max_value\n return new_range\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs:]\n calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for\n i in range(0, len(added_output_names), 2)]\n merged_added_output_dict = {i: merged_output_dict[i] for i in\n merged_output_dict if i not in self.model_original_outputs}\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[\n added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[\n added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[\n added_output_names[i]])\n max_value_array = max(merged_added_output_dict[\n added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,\n dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.\n calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=128, num_quantized_bins=\n 2048, percentile=99.999, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = (self.\n select_tensors_to_calibrate(self.model))\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in\n self.tensors_to_calibrate}\n if not self.collector:\n self.collector = HistogramCollector(method=self.method,\n symmetric=self.symmetric, num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins, percentile=self\n .percentile, scenario=self.scenario)\n self.collector.collect(clean_merged_dict)\n self.clear_collected_data()\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\n \"No collector created and can't generate calibration data.\")\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(\n f'Unknown calibrater {type(self)}. This method must be overwritten.'\n )\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, num_quantized_bins=\n num_quantized_bins)\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=2048, percentile=99.999):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, percentile=percentile)\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'distribution', num_bins=128, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n num_bins=num_bins, scenario=scenario)\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins,\n percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print('Collecting tensor data and making histogram ...')\n if self.method in {'distribution', 'entropy'}:\n return self.collect_value(name_to_arr)\n elif self.method == 'percentile':\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n data_arr = np.absolute(data_arr)\n if tensor not in self.histogram_dict:\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n width = old_hist_edges[1] - old_hist_edges[0]\n new_bin_edges = np.arange(old_hist_edges[-1] + width, \n temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[:len(old_hist)] += old_hist\n self.histogram_dict[tensor] = hist, hist_edges, min(old_min,\n min_value), max(old_max, max_value)\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n threshold = max(abs(min_value), abs(max_value))\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold)\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins,\n range=(-threshold, threshold))\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value, threshold\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max,\n new_threshold):\n old_hist, old_hist_edges, old_min, old_max, old_threshold = (\n old_histogram)\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-\n old_threshold, old_threshold))\n return new_hist + old_hist, old_hist_edges, min(old_min, new_min\n ), max(old_max, new_max), old_threshold\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist),\n range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) //\n old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = (half_increased_bins * old_stride +\n old_threshold)\n hist, hist_edges = np.histogram(data_arr, new_num_bins,\n range=(-new_threshold, new_threshold))\n hist[half_increased_bins:new_num_bins - half_increased_bins\n ] += old_hist\n return hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_threshold\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\n 'Histogram has not been collected. Please run collect() first.'\n )\n print(\n f'Finding optimal threshold for each tensor using {self.method} algorithm ...'\n )\n if self.method == 'entropy':\n return self.compute_entropy()\n elif self.method == 'percentile':\n return self.compute_percentile()\n elif self.method == 'distribution':\n return self.compute_distribution()\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\n 'Invalid percentile. Must be in range 0 <= percentile <= 100.')\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Percentile : ({100.0 - percentile},{percentile})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(\n hist_edges[idx_right])\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = float(hist_edges[idx_left]), float(\n hist_edges[idx_right])\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value\n thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(\n 'Number of histogram bins : {} (The number may increase depends on the data it collects)'\n .format(self.num_bins))\n print(f'Number of quantized bins : {self.num_quantized_bins}')\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram,\n num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(histogram[0], histogram[1])\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f'power={power} <= 0 is invalid.')\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values ** power).sum() / hist.sum()\n std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()\n ) ** 0.5\n return avg, std\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\n 'Invalid num_bins. Must be in range 512 <= num_bins.')\n histogram_dict = self.histogram_dict\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Scenario : {self.scenario!r})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n if self.scenario == 'same':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == 'p3':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=\n 1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\"\n )\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,\n hist=hist, hist_edges=hist_edges)\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n from scipy.stats import entropy\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=\n num_bins else num_bins)\n thresholds[i - num_half_quantized_bin] = float(hist_edges[\n start_index]), float(hist_edges[end_index])\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n p = sliced_distribution.copy()\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n nonzeros = (p != 0).astype(np.int64)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[\n num_quantized_bins * num_merged_bins:])\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float('inf')\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = min_value, optimal_threshold[1]\n if optimal_threshold[1] > max_value:\n optimal_threshold = optimal_threshold[0], max_value\n return optimal_threshold\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TensorsData:\n\n def __init__(self, calibration_method, data: Dict[str, Union[TensorData,\n Tuple]]):\n self.calibration_method = calibration_method\n self.data = {}\n for k, v in data.items():\n if not isinstance(k, str):\n raise TypeError(f'Keys must be strings not {type(k)}.')\n if isinstance(v, tuple):\n if calibration_method == CalibrationMethod.MinMax and len(v\n ) == 2:\n self.data[k] = TensorData(lowest=v[0], highest=v[1])\n continue\n if len(v) == 4:\n self.data[k] = TensorData(lowest=v[0], highest=v[1],\n histogram=v[2], bins=v[3])\n continue\n raise TypeError(\n f'Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.'\n )\n if not isinstance(v, TensorData):\n raise TypeError(f'Values must be TensorData not {type(v)}.')\n self.data[k] = v\n\n def __iter__(self):\n yield from self.data\n\n def __contains__(self, key):\n return key in self.data\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n if key not in self.data:\n raise RuntimeError(\n f'Only an existing tensor can be modified, {key!r} is not.')\n self.data[key] = value\n\n def values(self):\n return self.data.values()\n\n\nclass CalibrationMethod(Enum):\n MinMax = 0\n Entropy = 1\n Percentile = 2\n Distribution = 3\n\n\nclass CalibrationDataReader(metaclass=abc.ABCMeta):\n\n @classmethod\n def __subclasshook__(cls, subclass):\n return hasattr(subclass, 'get_next') and callable(subclass.get_next\n ) or NotImplemented\n\n @abc.abstractmethod\n def get_next(self) ->dict:\n \"\"\"generate the input data dict for ONNXinferenceSession run\"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.get_next()\n if result is None:\n raise StopIteration\n return result\n\n\nclass CalibraterBase:\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=False\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError('model_path should be model path.')\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = ['CPUExecutionProvider']\n\n def set_execution_providers(self, execution_providers=[\n 'CPUExecutionProvider']):\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = (onnxruntime.\n GraphOptimizationLevel.ORT_DISABLE_ALL)\n self.infer_session = onnxruntime.InferenceSession(self.\n augmented_model_path, sess_options=sess_options, providers=self\n .execution_providers)\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n for node in model.graph.node:\n if (not self.op_types_to_calibrate or node.op_type in self.\n op_types_to_calibrate):\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (vi.type.HasField('tensor_type') and vi.type.\n tensor_type.elem_type in\n tensor_type_to_calibrate and tensor_name not in\n initializer):\n tensors_to_calibrate.add(tensor_name)\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', symmetric=False, use_external_data_format=\n False, moving_average=False, averaging_constant=0.01):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1\n ):\n raise ValueError(\n 'Invalid averaging constant, which should not be < 0 or > 1.')\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.\n int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n keepdims = 1\n reduce_output = tensor_name + '_' + reduce_op_name\n intermediate_output = reduce_output + '_Reshape'\n reduce_node = onnx.helper.make_node(reduce_op_name, [\n tensor_name], [intermediate_output], keepdims=keepdims,\n name=reduce_output)\n reshape_node = onnx.helper.make_node('Reshape', inputs=[\n intermediate_output, reshape_shape_name], outputs=[\n reduce_output], name=intermediate_output)\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(\n reduce_output, TensorProto.FLOAT, [1]))\n for tensor in tensors:\n add_reduce_min_max(tensor, 'ReduceMin')\n add_reduce_min_max(tensor, 'ReduceMax')\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(\n f'compute_data must return a TensorsData not {type(t)}.')\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range\n [key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range\n [key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = min_value, max_value\n return new_range\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs:]\n calibrate_tensor_names = [added_output_names[i].rpartition('_')[0] for\n i in range(0, len(added_output_names), 2)]\n merged_added_output_dict = {i: merged_output_dict[i] for i in\n merged_output_dict if i not in self.model_original_outputs}\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[\n added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[\n added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[\n added_output_names[i]])\n max_value_array = max(merged_added_output_dict[\n added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax,\n dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.\n calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=128, num_quantized_bins=\n 2048, percentile=99.999, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate=\n op_types_to_calibrate, augmented_model_path=\n augmented_model_path, symmetric=symmetric,\n use_external_data_format=use_external_data_format)\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model\n .graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = (self.\n select_tensors_to_calibrate(self.model))\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n onnx.save(self.model, self.augmented_model_path,\n save_as_external_data=self.use_external_data_format)\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None,\n inputs))\n if len(self.intermediate_outputs) == 0:\n raise ValueError('No data is collected.')\n output_names = [self.infer_session.get_outputs()[i].name for i in\n range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [dict(zip(output_names, intermediate_output)) for\n intermediate_output in self.intermediate_outputs]\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in\n self.tensors_to_calibrate}\n if not self.collector:\n self.collector = HistogramCollector(method=self.method,\n symmetric=self.symmetric, num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins, percentile=self\n .percentile, scenario=self.scenario)\n self.collector.collect(clean_merged_dict)\n self.clear_collected_data()\n\n def compute_data(self) ->TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\n \"No collector created and can't generate calibration data.\")\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(\n f'Unknown calibrater {type(self)}. This method must be overwritten.'\n )\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'entropy', symmetric=False, num_bins=128, num_quantized_bins=128):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, num_quantized_bins=\n num_quantized_bins)\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'percentile', symmetric=False, num_bins=2048, percentile=99.999):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n symmetric=symmetric, num_bins=num_bins, percentile=percentile)\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n\n def __init__(self, model_path: Union[str, Path], op_types_to_calibrate:\n Optional[Sequence[str]]=None, augmented_model_path=\n 'augmented_model.onnx', use_external_data_format=False, method=\n 'distribution', num_bins=128, scenario='same'):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(model_path, op_types_to_calibrate,\n augmented_model_path, use_external_data_format, method=method,\n num_bins=num_bins, scenario=scenario)\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins,\n percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print('Collecting tensor data and making histogram ...')\n if self.method in {'distribution', 'entropy'}:\n return self.collect_value(name_to_arr)\n elif self.method == 'percentile':\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n data_arr = np.absolute(data_arr)\n if tensor not in self.histogram_dict:\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n width = old_hist_edges[1] - old_hist_edges[0]\n new_bin_edges = np.arange(old_hist_edges[-1] + width, \n temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[:len(old_hist)] += old_hist\n self.histogram_dict[tensor] = hist, hist_edges, min(old_min,\n min_value), max(old_max, max_value)\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr)\n data_arr = data_arr.flatten()\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n threshold = max(abs(min_value), abs(max_value))\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold)\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins,\n range=(-threshold, threshold))\n self.histogram_dict[tensor\n ] = hist, hist_edges, min_value, max_value, threshold\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max,\n new_threshold):\n old_hist, old_hist_edges, old_min, old_max, old_threshold = (\n old_histogram)\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-\n old_threshold, old_threshold))\n return new_hist + old_hist, old_hist_edges, min(old_min, new_min\n ), max(old_max, new_max), old_threshold\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist),\n range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) //\n old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = (half_increased_bins * old_stride +\n old_threshold)\n hist, hist_edges = np.histogram(data_arr, new_num_bins,\n range=(-new_threshold, new_threshold))\n hist[half_increased_bins:new_num_bins - half_increased_bins\n ] += old_hist\n return hist, hist_edges, min(old_min, new_min), max(old_max,\n new_max), new_threshold\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\n 'Histogram has not been collected. Please run collect() first.'\n )\n print(\n f'Finding optimal threshold for each tensor using {self.method} algorithm ...'\n )\n if self.method == 'entropy':\n return self.compute_entropy()\n elif self.method == 'percentile':\n return self.compute_percentile()\n elif self.method == 'distribution':\n return self.compute_distribution()\n else:\n raise ValueError(\n \"Only 'entropy', 'percentile' or 'distribution' methods are supported\"\n )\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\n 'Invalid percentile. Must be in range 0 <= percentile <= 100.')\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Percentile : ({100.0 - percentile},{percentile})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n thresholds_dict[tensor] = -float(hist_edges[idx_right]), float(\n hist_edges[idx_right])\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = float(hist_edges[idx_left]), float(\n hist_edges[idx_right])\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = min_value, thresholds_dict[tensor][1]\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = thresholds_dict[tensor][0], max_value\n thresholds_dict[tensor] = *thresholds_dict[tensor], *hist[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(\n 'Number of histogram bins : {} (The number may increase depends on the data it collects)'\n .format(self.num_bins))\n print(f'Number of quantized bins : {self.num_quantized_bins}')\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram,\n num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = *optimal_threshold, *histogram[:2]\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(histogram[0], histogram[1])\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f'power={power} <= 0 is invalid.')\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values ** power).sum() / hist.sum()\n std = ((hist * (values ** power - avg) ** 2).sum() / hist.sum()\n ) ** 0.5\n return avg, std\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values ** 2).sum() / hist.sum() - avg ** 2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\n 'Invalid num_bins. Must be in range 512 <= num_bins.')\n histogram_dict = self.histogram_dict\n thresholds_dict = {}\n print(f'Number of tensors : {len(histogram_dict)}')\n print(f'Number of histogram bins : {self.num_bins}')\n print(f'Scenario : {self.scenario!r})')\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n if self.scenario == 'same':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == 'p3':\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=\n 1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\"\n )\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef,\n hist=hist, hist_edges=hist_edges)\n if os.environ.get('QUANTIZATION_DEBUG', 0) in (1, '1'):\n apply_plot(hist, hist_edges)\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n from scipy.stats import entropy\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = (zero_bin_index + i + 1 if zero_bin_index + i + 1 <=\n num_bins else num_bins)\n thresholds[i - num_half_quantized_bin] = float(hist_edges[\n start_index]), float(hist_edges[end_index])\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n p = sliced_distribution.copy()\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n nonzeros = (p != 0).astype(np.int64)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[\n num_quantized_bins * num_merged_bins:])\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float('inf')\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = min_value, optimal_threshold[1]\n if optimal_threshold[1] > max_value:\n optimal_threshold = optimal_threshold[0], max_value\n return optimal_threshold\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python\n# -------------------------------------------------------------------------\n# Copyright (c) Microsoft, Intel Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\nimport abc\nimport itertools\nimport os\nimport uuid\nfrom enum import Enum\nfrom pathlib import Path\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport onnx\nfrom onnx import ModelProto, TensorProto, helper, numpy_helper\n\nimport onnxruntime\n\nfrom .quant_utils import apply_plot, load_model_with_shape_infer, smooth_distribution\n\n\nclass TensorData:\n _allowed = frozenset([\"avg\", \"std\", \"lowest\", \"highest\", \"hist\", \"hist_edges\"])\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n if k not in TensorData._allowed:\n raise ValueError(f\"Unexpected value {k!r} not in {TensorData._allowed}.\")\n setattr(self, k, v)\n\n @property\n def range_value(self):\n if not hasattr(self, \"lowest\") or not hasattr(self, \"highest\"):\n raise AttributeError(f\"Attributes 'lowest' and/or 'highest' missing in {dir(self)}.\")\n return (self.lowest, self.highest)\n\n @property\n def avg_std(self):\n if not hasattr(self, \"avg\") or not hasattr(self, \"std\"):\n raise AttributeError(f\"Attributes 'avg' and/or 'std' missing in {dir(self)}.\")\n return (self.avg, self.std)\n\n\nclass TensorsData:\n def __init__(self, calibration_method, data: Dict[str, Union[TensorData, Tuple]]):\n self.calibration_method = calibration_method\n self.data = {}\n for k, v in data.items():\n if not isinstance(k, str):\n raise TypeError(f\"Keys must be strings not {type(k)}.\")\n if isinstance(v, tuple):\n if calibration_method == CalibrationMethod.MinMax and len(v) == 2:\n self.data[k] = TensorData(lowest=v[0], highest=v[1])\n continue\n if len(v) == 4:\n self.data[k] = TensorData(lowest=v[0], highest=v[1], histogram=v[2], bins=v[3])\n continue\n raise TypeError(f\"Unexpected tuple for {k:r}, it has {len(v)} elements: {v}.\")\n if not isinstance(v, TensorData):\n raise TypeError(f\"Values must be TensorData not {type(v)}.\")\n self.data[k] = v\n\n def __iter__(self):\n yield from self.data\n\n def __contains__(self, key):\n return key in self.data\n\n def __getitem__(self, key):\n return self.data[key]\n\n def __setitem__(self, key, value):\n if key not in self.data:\n raise RuntimeError(f\"Only an existing tensor can be modified, {key!r} is not.\")\n self.data[key] = value\n\n def values(self):\n return self.data.values()\n\n\nclass CalibrationMethod(Enum):\n MinMax = 0\n Entropy = 1\n Percentile = 2\n Distribution = 3\n\n\nclass CalibrationDataReader(metaclass=abc.ABCMeta):\n @classmethod\n def __subclasshook__(cls, subclass):\n return hasattr(subclass, \"get_next\") and callable(subclass.get_next) or NotImplemented\n\n @abc.abstractmethod\n def get_next(self) -> dict:\n \"\"\"generate the input data dict for ONNXinferenceSession run\"\"\"\n raise NotImplementedError\n\n def __iter__(self):\n return self\n\n def __next__(self):\n result = self.get_next()\n if result is None:\n raise StopIteration\n return result\n\n\nclass CalibraterBase:\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n symmetric=False,\n use_external_data_format=False,\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It should be a model file path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n \"\"\"\n if isinstance(model_path, str):\n self.model = load_model_with_shape_infer(Path(model_path))\n elif isinstance(model_path, Path):\n self.model = load_model_with_shape_infer(model_path)\n else:\n raise ValueError(\"model_path should be model path.\")\n\n self.op_types_to_calibrate = op_types_to_calibrate\n self.augmented_model_path = augmented_model_path\n self.symmetric = symmetric\n self.use_external_data_format = use_external_data_format\n\n self.augment_model = None\n self.infer_session = None\n self.execution_providers = [\"CPUExecutionProvider\"]\n\n def set_execution_providers(self, execution_providers=[\"CPUExecutionProvider\"]): # noqa: B006\n \"\"\"\n reset the execution providers to execute the collect_data. It triggers to re-creating inference session.\n \"\"\"\n self.execution_providers = execution_providers\n self.create_inference_session()\n\n def create_inference_session(self):\n \"\"\"\n create an OnnxRuntime InferenceSession.\n \"\"\"\n sess_options = onnxruntime.SessionOptions()\n sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL\n self.infer_session = onnxruntime.InferenceSession(\n self.augmented_model_path,\n sess_options=sess_options,\n providers=self.execution_providers,\n )\n\n def select_tensors_to_calibrate(self, model: ModelProto):\n \"\"\"\n select input/output tensors of candidate nodes to calibrate.\n returns:\n tensors (set): set of tensor name.\n value_infos (dict): tensor name to value info.\n \"\"\"\n value_infos = {vi.name: vi for vi in model.graph.value_info}\n value_infos.update({ot.name: ot for ot in model.graph.output})\n value_infos.update({it.name: it for it in model.graph.input})\n initializer = {init.name for init in model.graph.initializer}\n\n tensors_to_calibrate = set()\n tensor_type_to_calibrate = {TensorProto.FLOAT}\n\n for node in model.graph.node:\n if not self.op_types_to_calibrate or node.op_type in self.op_types_to_calibrate:\n for tensor_name in itertools.chain(node.input, node.output):\n if tensor_name in value_infos:\n vi = value_infos[tensor_name]\n if (\n vi.type.HasField(\"tensor_type\")\n and (vi.type.tensor_type.elem_type in tensor_type_to_calibrate)\n and (tensor_name not in initializer)\n ):\n tensors_to_calibrate.add(tensor_name)\n\n return tensors_to_calibrate, value_infos\n\n def get_augment_model(self):\n \"\"\"\n return: augmented onnx model. Call after calling augment_graph\n \"\"\"\n return self.model\n\n def augment_graph(self):\n \"\"\"\n abstract method: augment the input model to prepare for collecting data. It will:\n 1. augment the model to be able to collect desired statistics data\n 2. save augmented model to augmented_model_paths\n \"\"\"\n raise NotImplementedError\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n abstract method: collect the tensors that will be used for range computation. It can be called multiple times.\n \"\"\"\n raise NotImplementedError\n\n def compute_data(self) -> TensorsData:\n \"\"\"\n abstract method: compute data based on the calibration method stored in TensorsData\n \"\"\"\n raise NotImplementedError\n\n\nclass MinMaxCalibrater(CalibraterBase):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n symmetric=False,\n use_external_data_format=False,\n moving_average=False,\n averaging_constant=0.01,\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param moving_average: compute the moving average of the minimum and maximum values instead of the global minimum and maximum.\n :param averaging_constant: constant smoothing factor to use when computing the moving average.\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate=op_types_to_calibrate,\n augmented_model_path=augmented_model_path,\n symmetric=symmetric,\n use_external_data_format=use_external_data_format,\n )\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model.graph.output}\n self.moving_average = moving_average\n if moving_average and (averaging_constant < 0 or averaging_constant > 1):\n raise ValueError(\"Invalid averaging constant, which should not be < 0 or > 1.\")\n self.averaging_constant = averaging_constant\n\n def augment_graph(self):\n \"\"\"\n Adds ReduceMin and ReduceMax nodes to all quantization_candidates op type nodes in\n model and ensures their outputs are stored as part of the graph output\n :return: augmented ONNX model\n \"\"\"\n tensors, _ = self.select_tensors_to_calibrate(self.model)\n reshape_shape_name = str(uuid.uuid4())\n reshape_shape = numpy_helper.from_array(np.array([1], dtype=np.int64), reshape_shape_name)\n self.model.graph.initializer.append(reshape_shape)\n\n def add_reduce_min_max(tensor_name, reduce_op_name):\n # When doing ReduceMax/ReduceMin, ORT can't reduce on dim with value of 0 if 'keepdims' is false.\n # To make the code simple, we always let keepdims to be 1.\n keepdims = 1\n\n # Adding ReduceMin/ReduceMax nodes: ReduceMin/ReduceMax -> Reshape-> (output)\n reduce_output = tensor_name + \"_\" + reduce_op_name\n intermediate_output = reduce_output + \"_Reshape\"\n reduce_node = onnx.helper.make_node(\n reduce_op_name, [tensor_name], [intermediate_output], keepdims=keepdims, name=reduce_output\n )\n\n reshape_node = onnx.helper.make_node(\n \"Reshape\",\n inputs=[intermediate_output, reshape_shape_name],\n outputs=[reduce_output],\n name=intermediate_output,\n )\n\n self.model.graph.node.extend([reduce_node, reshape_node])\n self.model.graph.output.append(helper.make_tensor_value_info(reduce_output, TensorProto.FLOAT, [1]))\n\n for tensor in tensors:\n add_reduce_min_max(tensor, \"ReduceMin\")\n add_reduce_min_max(tensor, \"ReduceMax\")\n\n onnx.save(\n self.model,\n self.augmented_model_path,\n save_as_external_data=self.use_external_data_format,\n )\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None, inputs))\n\n if len(self.intermediate_outputs) == 0:\n raise ValueError(\"No data is collected.\")\n\n t = self.compute_data()\n if not isinstance(t, TensorsData):\n raise TypeError(f\"compute_data must return a TensorsData not {type(t)}.\")\n self.clear_collected_data()\n\n def merge_range(self, old_range, new_range):\n if not old_range:\n return new_range\n\n for key, value in old_range.items():\n if self.moving_average:\n min_value = value[0] + self.averaging_constant * (new_range[key][0] - value[0])\n max_value = value[1] + self.averaging_constant * (new_range[key][1] - value[1])\n else:\n min_value = min(value[0], new_range[key][0])\n max_value = max(value[1], new_range[key][1])\n new_range[key] = (min_value, max_value)\n\n return new_range\n\n def compute_data(self) -> TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {added node names: (ReduceMin, ReduceMax) pairs }\n \"\"\"\n\n if len(self.intermediate_outputs) == 0:\n return self.calibrate_tensors_range\n\n output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [\n dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs\n ]\n\n merged_output_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_output_dict.setdefault(k, []).append(v)\n added_output_names = output_names[self.num_model_outputs :]\n calibrate_tensor_names = [\n added_output_names[i].rpartition(\"_\")[0] for i in range(0, len(added_output_names), 2)\n ] # output names\n\n merged_added_output_dict = {\n i: merged_output_dict[i] for i in merged_output_dict if i not in self.model_original_outputs\n }\n\n pairs = []\n for i in range(0, len(added_output_names), 2):\n min_value = 0\n max_value = 0\n if self.moving_average:\n min_value_array = np.mean(merged_added_output_dict[added_output_names[i]], axis=0)\n max_value_array = np.mean(merged_added_output_dict[added_output_names[i + 1]], axis=0)\n else:\n min_value_array = min(merged_added_output_dict[added_output_names[i]])\n max_value_array = max(merged_added_output_dict[added_output_names[i + 1]])\n if type(min_value_array) == int or min_value_array.size > 0:\n min_value = float(min_value_array)\n if type(max_value_array) == int or max_value_array.size > 0:\n max_value = float(max_value_array)\n\n if self.symmetric:\n max_absolute_value = max(abs(min_value), abs(max_value))\n pairs.append(tuple([-max_absolute_value, max_absolute_value]))\n else:\n pairs.append(tuple([min_value, max_value]))\n\n new_calibrate_tensors_range = TensorsData(CalibrationMethod.MinMax, dict(zip(calibrate_tensor_names, pairs)))\n if self.calibrate_tensors_range:\n self.calibrate_tensors_range = self.merge_range(self.calibrate_tensors_range, new_calibrate_tensors_range)\n else:\n self.calibrate_tensors_range = new_calibrate_tensors_range\n\n return self.calibrate_tensors_range\n\n\nclass HistogramCalibrater(CalibraterBase):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n use_external_data_format=False,\n method=\"percentile\",\n symmetric=False,\n num_bins=128,\n num_quantized_bins=2048,\n percentile=99.999,\n scenario=\"same\",\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path.\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n :param scenario: see :class:`DistributionCalibrater`\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate=op_types_to_calibrate,\n augmented_model_path=augmented_model_path,\n symmetric=symmetric,\n use_external_data_format=use_external_data_format,\n )\n self.intermediate_outputs = []\n self.calibrate_tensors_range = None\n self.num_model_outputs = len(self.model.graph.output)\n self.model_original_outputs = {output.name for output in self.model.graph.output}\n self.collector = None\n self.method = method\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.tensors_to_calibrate = None\n self.scenario = scenario\n\n def augment_graph(self):\n \"\"\"\n make all quantization_candidates op type nodes as part of the graph output.\n :return: augmented ONNX model\n \"\"\"\n self.tensors_to_calibrate, value_infos = self.select_tensors_to_calibrate(self.model)\n for tensor in self.tensors_to_calibrate:\n if tensor not in self.model_original_outputs:\n self.model.graph.output.append(value_infos[tensor])\n\n onnx.save(\n self.model,\n self.augmented_model_path,\n save_as_external_data=self.use_external_data_format,\n )\n\n def clear_collected_data(self):\n self.intermediate_outputs = []\n\n def collect_data(self, data_reader: CalibrationDataReader):\n \"\"\"\n Entropy Calibrator collects operators' tensors as well as generates tensor histogram for each operator.\n \"\"\"\n while True:\n inputs = data_reader.get_next()\n if not inputs:\n break\n self.intermediate_outputs.append(self.infer_session.run(None, inputs))\n\n if len(self.intermediate_outputs) == 0:\n raise ValueError(\"No data is collected.\")\n\n output_names = [self.infer_session.get_outputs()[i].name for i in range(len(self.intermediate_outputs[0]))]\n output_dicts_list = [\n dict(zip(output_names, intermediate_output)) for intermediate_output in self.intermediate_outputs\n ]\n\n merged_dict = {}\n for d in output_dicts_list:\n for k, v in d.items():\n merged_dict.setdefault(k, []).append(v)\n\n clean_merged_dict = {i: merged_dict[i] for i in merged_dict if i in self.tensors_to_calibrate}\n\n if not self.collector:\n self.collector = HistogramCollector(\n method=self.method,\n symmetric=self.symmetric,\n num_bins=self.num_bins,\n num_quantized_bins=self.num_quantized_bins,\n percentile=self.percentile,\n scenario=self.scenario,\n )\n self.collector.collect(clean_merged_dict)\n\n self.clear_collected_data()\n\n def compute_data(self) -> TensorsData:\n \"\"\"\n Compute the min-max range of tensor\n :return: dictionary mapping: {tensor name: (min value, max value)}\n \"\"\"\n if not self.collector:\n raise ValueError(\"No collector created and can't generate calibration data.\")\n\n if isinstance(self, EntropyCalibrater):\n cal = CalibrationMethod.Entropy\n elif isinstance(self, PercentileCalibrater):\n cal = CalibrationMethod.Percentile\n elif isinstance(self, DistributionCalibrater):\n cal = CalibrationMethod.Distribution\n else:\n raise TypeError(f\"Unknown calibrater {type(self)}. This method must be overwritten.\")\n return TensorsData(cal, self.collector.compute_collection_result())\n\n\nclass EntropyCalibrater(HistogramCalibrater):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n use_external_data_format=False,\n method=\"entropy\",\n symmetric=False,\n num_bins=128,\n num_quantized_bins=128,\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param num_quantized_bins: number of quantized bins. Default 128.\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format,\n method=method,\n symmetric=symmetric,\n num_bins=num_bins,\n num_quantized_bins=num_quantized_bins,\n )\n\n\nclass PercentileCalibrater(HistogramCalibrater):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n use_external_data_format=False,\n method=\"percentile\",\n symmetric=False,\n num_bins=2048,\n percentile=99.999,\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_quantized_bins: number of quantized bins. Default 128.\n :param percentile: A float number between [0, 100]. Default 99.99.\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format,\n method=method,\n symmetric=symmetric,\n num_bins=num_bins,\n percentile=percentile,\n )\n\n\nclass DistributionCalibrater(HistogramCalibrater):\n def __init__(\n self,\n model_path: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n use_external_data_format=False,\n method=\"distribution\",\n num_bins=128,\n scenario=\"same\",\n ):\n \"\"\"\n :param model_path: ONNX model to calibrate. It is a model path\n :param op_types_to_calibrate: operator types to calibrate. By default, calibrate all the float32/float16 tensors.\n :param augmented_model_path: save augmented model to this path.\n :param use_external_data_format: use external data format to store model which size is >= 2Gb\n :param method: A string. One of ['entropy', 'percentile', 'distribution'].\n :param symmetric: make range of tensor symmetric (central point is 0).\n :param num_bins: number of bins to create a new histogram for collecting tensor values.\n :param scenario: for float 8 only, if `scenario=\"same\"`,\n the algorithm weights and float 8 follow the same distribution,\n if `scenario=\"p3\"`, it assumes the weights follow\n a gaussian law and float 8 ~ X^3 where X is a gaussian law\n \"\"\"\n super().__init__(\n model_path,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format,\n method=method,\n num_bins=num_bins,\n scenario=scenario,\n )\n\n\nclass CalibrationDataCollector(metaclass=abc.ABCMeta):\n \"\"\"\n Base class for collecting data for calibration-based quantization.\n \"\"\"\n\n @abc.abstractmethod\n def collect(self, name_to_arr):\n \"\"\"\n Generate informative data based on given data.\n name_to_arr : dict\n tensor name to NDArray data\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def compute_collection_result(self):\n \"\"\"\n Get the optimal result among collection data.\n \"\"\"\n raise NotImplementedError\n\n\nclass HistogramCollector(CalibrationDataCollector):\n \"\"\"\n Collecting histogram for each tensor. Percentile and Entropy method are supported.\n\n ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py\n ref: https://docs.nvidia.com/deeplearning/tensorrt/pytorch-quantization-toolkit/docs/_modules/\n pytorch_quantization/calib/histogram.html\n \"\"\"\n\n def __init__(self, method, symmetric, num_bins, num_quantized_bins, percentile, scenario):\n self.histogram_dict = {}\n self.method = method\n self.symmetric = symmetric\n self.num_bins = num_bins\n self.num_quantized_bins = num_quantized_bins\n self.percentile = percentile\n self.scenario = scenario\n\n def get_histogram_dict(self):\n return self.histogram_dict\n\n def collect(self, name_to_arr):\n print(\"Collecting tensor data and making histogram ...\")\n\n # TODO: Currently we have different collect() for entropy and percentile method respectively.\n # Need unified collect in the future.\n if self.method in {\"distribution\", \"entropy\"}:\n return self.collect_value(name_to_arr)\n elif self.method == \"percentile\":\n if self.symmetric:\n return self.collect_absolute_value(name_to_arr)\n else:\n return self.collect_value(name_to_arr)\n else:\n raise ValueError(\"Only 'entropy', 'percentile' or 'distribution' methods are supported\")\n\n def collect_absolute_value(self, name_to_arr):\n \"\"\"\n Collect histogram on absolute value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n data_arr = np.absolute(data_arr) # only consider absolute value # noqa: PLW2901\n\n if tensor not in self.histogram_dict:\n # first time it uses num_bins to compute histogram.\n hist, hist_edges = np.histogram(data_arr, bins=self.num_bins)\n self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value)\n else:\n old_histogram = self.histogram_dict[tensor]\n old_min = old_histogram[2]\n old_max = old_histogram[3]\n old_hist = old_histogram[0]\n old_hist_edges = old_histogram[1]\n temp_amax = np.max(data_arr)\n if temp_amax > old_hist_edges[-1]:\n # increase the number of bins\n width = old_hist_edges[1] - old_hist_edges[0]\n # NOTE: np.arange may create an extra bin after the one containing temp_amax\n new_bin_edges = np.arange(old_hist_edges[-1] + width, temp_amax + width, width)\n old_hist_edges = np.hstack((old_hist_edges, new_bin_edges))\n hist, hist_edges = np.histogram(data_arr, bins=old_hist_edges)\n hist[: len(old_hist)] += old_hist\n self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value))\n\n def collect_value(self, name_to_arr):\n \"\"\"\n Collect histogram on real value\n \"\"\"\n for tensor, data_arr in name_to_arr.items():\n data_arr = np.asarray(data_arr) # noqa: PLW2901\n data_arr = data_arr.flatten() # noqa: PLW2901\n\n if data_arr.size > 0:\n min_value = np.min(data_arr)\n max_value = np.max(data_arr)\n else:\n min_value = 0\n max_value = 0\n\n threshold = max(abs(min_value), abs(max_value))\n\n if tensor in self.histogram_dict:\n old_histogram = self.histogram_dict[tensor]\n self.histogram_dict[tensor] = self.merge_histogram(\n old_histogram, data_arr, min_value, max_value, threshold\n )\n else:\n hist, hist_edges = np.histogram(data_arr, self.num_bins, range=(-threshold, threshold))\n self.histogram_dict[tensor] = (\n hist,\n hist_edges,\n min_value,\n max_value,\n threshold,\n )\n\n def merge_histogram(self, old_histogram, data_arr, new_min, new_max, new_threshold):\n (old_hist, old_hist_edges, old_min, old_max, old_threshold) = old_histogram\n\n if new_threshold <= old_threshold:\n new_hist, _ = np.histogram(data_arr, len(old_hist), range=(-old_threshold, old_threshold))\n return (\n new_hist + old_hist,\n old_hist_edges,\n min(old_min, new_min),\n max(old_max, new_max),\n old_threshold,\n )\n else:\n if old_threshold == 0:\n hist, hist_edges = np.histogram(data_arr, len(old_hist), range=(-new_threshold, new_threshold))\n hist += old_hist\n else:\n old_num_bins = len(old_hist)\n old_stride = 2 * old_threshold / old_num_bins\n half_increased_bins = int((new_threshold - old_threshold) // old_stride + 1)\n new_num_bins = old_num_bins + 2 * half_increased_bins\n new_threshold = half_increased_bins * old_stride + old_threshold\n hist, hist_edges = np.histogram(data_arr, new_num_bins, range=(-new_threshold, new_threshold))\n hist[half_increased_bins : new_num_bins - half_increased_bins] += old_hist\n return (\n hist,\n hist_edges,\n min(old_min, new_min),\n max(old_max, new_max),\n new_threshold,\n )\n\n def compute_collection_result(self):\n if not self.histogram_dict or len(self.histogram_dict) == 0:\n raise ValueError(\"Histogram has not been collected. Please run collect() first.\")\n print(f\"Finding optimal threshold for each tensor using {self.method} algorithm ...\")\n\n if self.method == \"entropy\":\n return self.compute_entropy()\n elif self.method == \"percentile\":\n return self.compute_percentile()\n elif self.method == \"distribution\":\n return self.compute_distribution()\n else:\n raise ValueError(\"Only 'entropy', 'percentile' or 'distribution' methods are supported\")\n\n def compute_percentile(self):\n if self.percentile < 0 or self.percentile > 100:\n raise ValueError(\"Invalid percentile. Must be in range 0 <= percentile <= 100.\")\n\n histogram_dict = self.histogram_dict\n percentile = self.percentile\n\n thresholds_dict = {} # per tensor thresholds\n\n print(f\"Number of tensors : {len(histogram_dict)}\")\n print(f\"Number of histogram bins : {self.num_bins}\")\n print(f\"Percentile : ({100.0 - percentile},{percentile})\")\n\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n total = hist.sum()\n cdf = np.cumsum(hist / total)\n if self.symmetric:\n idx_right = np.searchsorted(cdf, percentile / 100.0)\n\n thresholds_dict[tensor] = (\n -float(hist_edges[idx_right]),\n float(hist_edges[idx_right]),\n )\n else:\n percent_to_cut_one_side = (100.0 - percentile) / 200.0\n idx_right = np.searchsorted(cdf, 1.0 - percent_to_cut_one_side)\n idx_left = np.searchsorted(cdf, percent_to_cut_one_side)\n thresholds_dict[tensor] = (\n float(hist_edges[idx_left]),\n float(hist_edges[idx_right]),\n )\n min_value = histogram[2]\n max_value = histogram[3]\n if thresholds_dict[tensor][0] < min_value:\n thresholds_dict[tensor] = (min_value, thresholds_dict[tensor][1])\n if thresholds_dict[tensor][1] > max_value:\n thresholds_dict[tensor] = (thresholds_dict[tensor][0], max_value)\n thresholds_dict[tensor] = (*thresholds_dict[tensor], *hist[:2])\n # Plot histogram for debug only\n if os.environ.get(\"QUANTIZATION_DEBUG\", 0) in (1, \"1\"):\n apply_plot(hist, hist_edges)\n\n return thresholds_dict\n\n def compute_entropy(self):\n histogram_dict = self.histogram_dict\n num_quantized_bins = self.num_quantized_bins\n\n thresholds_dict = {} # per tensor thresholds\n\n print(f\"Number of tensors : {len(histogram_dict)}\")\n print(\n \"Number of histogram bins : {} (The number may increase depends on the data it collects)\".format(\n self.num_bins\n )\n )\n print(f\"Number of quantized bins : {self.num_quantized_bins}\")\n\n for tensor, histogram in histogram_dict.items():\n optimal_threshold = self.get_entropy_threshold(histogram, num_quantized_bins)\n thresholds_dict[tensor] = optimal_threshold\n thresholds_dict[tensor] = (*optimal_threshold, *histogram[:2])\n\n # Plot histogram for debug only\n if os.environ.get(\"QUANTIZATION_DEBUG\", 0) in (1, \"1\"):\n apply_plot(histogram[0], histogram[1])\n\n return thresholds_dict\n\n @staticmethod\n def _avg_std(hist, hist_edges, power=1):\n if power <= 0:\n raise ValueError(f\"power={power} <= 0 is invalid.\")\n values = (hist_edges[:-1] + hist_edges[1:]) * 0.5\n if power == 1:\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5\n return avg, std\n if int(power) == power and int(power) % 2 == 1:\n avg = (hist * values**power).sum() / hist.sum()\n std = ((hist * (values**power - avg) ** 2).sum() / hist.sum()) ** 0.5\n return avg, std\n\n fact = np.abs(values) / values\n fact[np.isnan(fact)] = 1\n fact[np.isinf(fact)] = 1\n values = np.abs(values) ** power * fact\n avg = (hist * values).sum() / hist.sum()\n std = ((hist * values**2).sum() / hist.sum() - avg**2) ** 0.5\n return avg, std\n\n def compute_distribution(self):\n if self.num_bins < 512:\n raise ValueError(\"Invalid num_bins. Must be in range 512 <= num_bins.\")\n\n histogram_dict = self.histogram_dict\n thresholds_dict = {} # per tensor thresholds\n\n print(f\"Number of tensors : {len(histogram_dict)}\")\n print(f\"Number of histogram bins : {self.num_bins}\")\n print(f\"Scenario : {self.scenario!r})\")\n\n for tensor, histogram in histogram_dict.items():\n hist = histogram[0]\n hist_edges = histogram[1]\n\n if self.scenario == \"same\":\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1)\n elif self.scenario == \"p3\":\n avg_coef, std_coef = self._avg_std(hist, hist_edges, power=1.0 / 3.0)\n else:\n raise ValueError(\"Invalid scenario. Must be in {'same', 'p3'}.\")\n thresholds_dict[tensor] = TensorData(avg=avg_coef, std=std_coef, hist=hist, hist_edges=hist_edges)\n\n # Plot histogram for debug only\n if os.environ.get(\"QUANTIZATION_DEBUG\", 0) in (1, \"1\"):\n apply_plot(hist, hist_edges)\n\n return thresholds_dict\n\n def get_entropy_threshold(self, histogram, num_quantized_bins):\n \"\"\"Given a dataset, find the optimal threshold for quantizing it.\n The reference distribution is `q`, and the candidate distribution is `p`.\n `q` is a truncated version of the original distribution.\n Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf\n \"\"\"\n import copy\n\n from scipy.stats import entropy\n\n hist = histogram[0]\n hist_edges = histogram[1]\n num_bins = hist.size\n zero_bin_index = num_bins // 2\n num_half_quantized_bin = num_quantized_bins // 2\n\n kl_divergence = np.zeros(zero_bin_index - num_half_quantized_bin + 1)\n thresholds = [(0, 0) for i in range(kl_divergence.size)]\n\n # <------------ num bins ---------------->\n # <--- quantized bins ---->\n # |======|===========|===========|=======|\n # zero bin index\n # ^ ^\n # | |\n # start index end index (start of iteration)\n # ^ ^\n # | |\n # start index end index ...\n # ^ ^\n # | |\n # start index end index (end of iteration)\n\n for i in range(num_half_quantized_bin, zero_bin_index + 1, 1):\n start_index = zero_bin_index - i\n end_index = zero_bin_index + i + 1 if (zero_bin_index + i + 1) <= num_bins else num_bins\n\n thresholds[i - num_half_quantized_bin] = (\n float(hist_edges[start_index]),\n float(hist_edges[end_index]),\n )\n\n sliced_distribution = copy.deepcopy(hist[start_index:end_index])\n\n # reference distribution p\n p = sliced_distribution.copy() # a copy of np array\n left_outliers_count = sum(hist[:start_index])\n right_outliers_count = sum(hist[end_index:])\n p[0] += left_outliers_count\n p[-1] += right_outliers_count\n\n # nonzeros[i] incidates whether p[i] is non-zero\n nonzeros = (p != 0).astype(np.int64)\n\n # quantize p.size bins into quantized bins (default 128 bins)\n quantized_bins = np.zeros(num_quantized_bins, dtype=np.int64)\n num_merged_bins = sliced_distribution.size // num_quantized_bins\n\n # merge bins into quantized bins\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n quantized_bins[index] = sum(sliced_distribution[start:end])\n quantized_bins[-1] += sum(sliced_distribution[num_quantized_bins * num_merged_bins :])\n\n # in order to compare p and q, we need to make length of q equals to length of p\n # expand quantized bins into p.size bins\n q = np.zeros(p.size, dtype=np.int64)\n for index in range(num_quantized_bins):\n start = index * num_merged_bins\n end = start + num_merged_bins\n\n norm = sum(nonzeros[start:end])\n if norm != 0:\n q[start:end] = float(quantized_bins[index]) / float(norm)\n\n p = smooth_distribution(p)\n q = smooth_distribution(q)\n\n if isinstance(q, np.ndarray):\n kl_divergence[i - num_half_quantized_bin] = entropy(p, q)\n else:\n kl_divergence[i - num_half_quantized_bin] = float(\"inf\")\n\n min_kl_divergence_idx = np.argmin(kl_divergence)\n optimal_threshold = thresholds[min_kl_divergence_idx]\n min_value = histogram[2]\n max_value = histogram[3]\n if optimal_threshold[0] < min_value:\n optimal_threshold = (min_value, optimal_threshold[1])\n if optimal_threshold[1] > max_value:\n optimal_threshold = (optimal_threshold[0], max_value)\n return optimal_threshold\n\n\ndef create_calibrator(\n model: Union[str, Path],\n op_types_to_calibrate: Optional[Sequence[str]] = None,\n augmented_model_path=\"augmented_model.onnx\",\n calibrate_method=CalibrationMethod.MinMax,\n use_external_data_format=False,\n extra_options={}, # noqa: B006\n):\n calibrator = None\n if calibrate_method == CalibrationMethod.MinMax:\n # default settings for min-max algorithm\n symmetric = False if \"symmetric\" not in extra_options else extra_options[\"symmetric\"]\n moving_average = False if \"moving_average\" not in extra_options else extra_options[\"moving_average\"]\n averaging_constant = 0.01 if \"averaging_constant\" not in extra_options else extra_options[\"averaging_constant\"]\n calibrator = MinMaxCalibrater(\n model,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format=use_external_data_format,\n symmetric=symmetric,\n moving_average=moving_average,\n averaging_constant=averaging_constant,\n )\n elif calibrate_method == CalibrationMethod.Entropy:\n # default settings for entropy algorithm\n num_bins = 128 if \"num_bins\" not in extra_options else extra_options[\"num_bins\"]\n num_quantized_bins = 128 if \"num_quantized_bins\" not in extra_options else extra_options[\"num_quantized_bins\"]\n symmetric = False if \"symmetric\" not in extra_options else extra_options[\"symmetric\"]\n calibrator = EntropyCalibrater(\n model,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format=use_external_data_format,\n symmetric=symmetric,\n num_bins=num_bins,\n num_quantized_bins=num_quantized_bins,\n )\n elif calibrate_method == CalibrationMethod.Percentile:\n # default settings for percentile algorithm\n num_bins = 2048 if \"num_bins\" not in extra_options else extra_options[\"num_bins\"]\n percentile = 99.999 if \"percentile\" not in extra_options else extra_options[\"percentile\"]\n symmetric = True if \"symmetric\" not in extra_options else extra_options[\"symmetric\"]\n calibrator = PercentileCalibrater(\n model,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format=use_external_data_format,\n symmetric=symmetric,\n num_bins=num_bins,\n percentile=percentile,\n )\n\n elif calibrate_method == CalibrationMethod.Distribution:\n # default settings for percentile algorithm\n num_bins = 2048 if \"num_bins\" not in extra_options else extra_options[\"num_bins\"]\n scenario = \"same\" if \"scenario\" not in extra_options else extra_options[\"scenario\"]\n\n calibrator = DistributionCalibrater(\n model,\n op_types_to_calibrate,\n augmented_model_path,\n use_external_data_format=use_external_data_format,\n num_bins=num_bins,\n scenario=scenario,\n )\n\n if calibrator:\n calibrator.augment_graph()\n calibrator.create_inference_session()\n return calibrator\n\n raise ValueError(f\"Unsupported calibration method {calibrate_method}\")\n",
"step-ids": [
46,
56,
59,
60,
68
]
}
|
[
46,
56,
59,
60,
68
] |
import unittest
import numpy
import set_solver
class TestSets(unittest.TestCase):
def test_is_set(self):
"""Test set validator (Exercise 3a)."""
cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,
2], [0, 1, 2, 2, 2]])
self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))
self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))
self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))
def test_find_sets(self):
"""Test solver (Exercise 3b)."""
cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,
2], [0, 1, 2, 2, 2]])
set_indices = set_solver.find_sets(cards)
self.assertEqual(len(set_indices), 2)
self.assertTrue((0, 1, 2) in set_indices)
self.assertTrue((2, 3, 4) in set_indices)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "6065fae2a11f6b525ef10346e297505ec9d4e9d5",
"index": 8550,
"step-1": "<mask token>\n\n\nclass TestSets(unittest.TestCase):\n\n def test_is_set(self):\n \"\"\"Test set validator (Exercise 3a).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestSets(unittest.TestCase):\n\n def test_is_set(self):\n \"\"\"Test set validator (Exercise 3a).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))\n\n def test_find_sets(self):\n \"\"\"Test solver (Exercise 3b).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestSets(unittest.TestCase):\n\n def test_is_set(self):\n \"\"\"Test set validator (Exercise 3a).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))\n\n def test_find_sets(self):\n \"\"\"Test solver (Exercise 3b).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nimport numpy\nimport set_solver\n\n\nclass TestSets(unittest.TestCase):\n\n def test_is_set(self):\n \"\"\"Test set validator (Exercise 3a).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n self.assertTrue(set_solver.is_set(cards, [0, 1, 2]))\n self.assertFalse(set_solver.is_set(cards, [0, 1, 3]))\n self.assertTrue(set_solver.is_set(cards, [2, 3, 4]))\n\n def test_find_sets(self):\n \"\"\"Test solver (Exercise 3b).\"\"\"\n cards = numpy.array([[1, 1, 1, 2, 0], [0, 1, 2, 2, 2], [0, 1, 2, 2,\n 2], [0, 1, 2, 2, 2]])\n set_indices = set_solver.find_sets(cards)\n self.assertEqual(len(set_indices), 2)\n self.assertTrue((0, 1, 2) in set_indices)\n self.assertTrue((2, 3, 4) in set_indices)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
4,
5
]
}
|
[
2,
3,
4,
5
] |
from typing import Any, Dict
from django.http import HttpRequest, HttpResponse
from zerver.decorator import REQ, has_request_variables, webhook_view
from zerver.lib.response import json_success
from zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message
from zerver.models import UserProfile
FRESHPING_TOPIC_TEMPLATE_TEST = "Freshping"
FRESHPING_TOPIC_TEMPLATE = "{check_name}"
FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = """
{request_url} has just become unreachable.
Error code: {http_status_code}.
""".strip()
FRESHPING_MESSAGE_TEMPLATE_UP = "{request_url} is back up and no longer unreachable."
@webhook_view("Freshping")
@has_request_variables
def api_freshping_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: Dict[str, Any] = REQ(argument_type="body"),
) -> HttpResponse:
body = get_body_for_http_request(payload)
subject = get_subject_for_http_request(payload)
check_send_webhook_message(request, user_profile, subject, body)
return json_success()
def get_subject_for_http_request(payload: Dict[str, Any]) -> str:
webhook_event_data = payload["webhook_event_data"]
if webhook_event_data["application_name"] == "Webhook test":
subject = FRESHPING_TOPIC_TEMPLATE_TEST
else:
subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=webhook_event_data["check_name"])
return subject
def get_body_for_http_request(payload: Dict[str, Any]) -> str:
webhook_event_data = payload["webhook_event_data"]
if webhook_event_data["check_state_name"] == "Reporting Error":
body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**webhook_event_data)
elif webhook_event_data["check_state_name"] == "Available":
if webhook_event_data["application_name"] == "Webhook test":
body = get_setup_webhook_message("Freshping")
else:
body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)
return body
|
normal
|
{
"blob_id": "f60d02fb14364fb631d87fcf535b2cb5782e728f",
"index": 6539,
"step-1": "<mask token>\n\n\n@webhook_view('Freshping')\n@has_request_variables\ndef api_freshping_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) ->HttpResponse:\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@webhook_view('Freshping')\n@has_request_variables\ndef api_freshping_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) ->HttpResponse:\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\ndef get_subject_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['application_name'] == 'Webhook test':\n subject = FRESHPING_TOPIC_TEMPLATE_TEST\n else:\n subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=\n webhook_event_data['check_name'])\n return subject\n\n\ndef get_body_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['check_state_name'] == 'Reporting Error':\n body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**\n webhook_event_data)\n elif webhook_event_data['check_state_name'] == 'Available':\n if webhook_event_data['application_name'] == 'Webhook test':\n body = get_setup_webhook_message('Freshping')\n else:\n body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)\n return body\n",
"step-3": "<mask token>\nFRESHPING_TOPIC_TEMPLATE_TEST = 'Freshping'\nFRESHPING_TOPIC_TEMPLATE = '{check_name}'\nFRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = (\n \"\"\"\n{request_url} has just become unreachable.\nError code: {http_status_code}.\n\"\"\"\n .strip())\nFRESHPING_MESSAGE_TEMPLATE_UP = (\n '{request_url} is back up and no longer unreachable.')\n\n\n@webhook_view('Freshping')\n@has_request_variables\ndef api_freshping_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) ->HttpResponse:\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\ndef get_subject_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['application_name'] == 'Webhook test':\n subject = FRESHPING_TOPIC_TEMPLATE_TEST\n else:\n subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=\n webhook_event_data['check_name'])\n return subject\n\n\ndef get_body_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['check_state_name'] == 'Reporting Error':\n body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**\n webhook_event_data)\n elif webhook_event_data['check_state_name'] == 'Available':\n if webhook_event_data['application_name'] == 'Webhook test':\n body = get_setup_webhook_message('Freshping')\n else:\n body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)\n return body\n",
"step-4": "from typing import Any, Dict\nfrom django.http import HttpRequest, HttpResponse\nfrom zerver.decorator import REQ, has_request_variables, webhook_view\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message\nfrom zerver.models import UserProfile\nFRESHPING_TOPIC_TEMPLATE_TEST = 'Freshping'\nFRESHPING_TOPIC_TEMPLATE = '{check_name}'\nFRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = (\n \"\"\"\n{request_url} has just become unreachable.\nError code: {http_status_code}.\n\"\"\"\n .strip())\nFRESHPING_MESSAGE_TEMPLATE_UP = (\n '{request_url} is back up and no longer unreachable.')\n\n\n@webhook_view('Freshping')\n@has_request_variables\ndef api_freshping_webhook(request: HttpRequest, user_profile: UserProfile,\n payload: Dict[str, Any]=REQ(argument_type='body')) ->HttpResponse:\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\ndef get_subject_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['application_name'] == 'Webhook test':\n subject = FRESHPING_TOPIC_TEMPLATE_TEST\n else:\n subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=\n webhook_event_data['check_name'])\n return subject\n\n\ndef get_body_for_http_request(payload: Dict[str, Any]) ->str:\n webhook_event_data = payload['webhook_event_data']\n if webhook_event_data['check_state_name'] == 'Reporting Error':\n body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**\n webhook_event_data)\n elif webhook_event_data['check_state_name'] == 'Available':\n if webhook_event_data['application_name'] == 'Webhook test':\n body = get_setup_webhook_message('Freshping')\n else:\n body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)\n return body\n",
"step-5": "from typing import Any, Dict\n\nfrom django.http import HttpRequest, HttpResponse\n\nfrom zerver.decorator import REQ, has_request_variables, webhook_view\nfrom zerver.lib.response import json_success\nfrom zerver.lib.webhooks.common import check_send_webhook_message, get_setup_webhook_message\nfrom zerver.models import UserProfile\n\nFRESHPING_TOPIC_TEMPLATE_TEST = \"Freshping\"\nFRESHPING_TOPIC_TEMPLATE = \"{check_name}\"\n\nFRESHPING_MESSAGE_TEMPLATE_UNREACHABLE = \"\"\"\n{request_url} has just become unreachable.\nError code: {http_status_code}.\n\"\"\".strip()\nFRESHPING_MESSAGE_TEMPLATE_UP = \"{request_url} is back up and no longer unreachable.\"\n\n\n@webhook_view(\"Freshping\")\n@has_request_variables\ndef api_freshping_webhook(\n request: HttpRequest,\n user_profile: UserProfile,\n payload: Dict[str, Any] = REQ(argument_type=\"body\"),\n) -> HttpResponse:\n\n body = get_body_for_http_request(payload)\n subject = get_subject_for_http_request(payload)\n\n check_send_webhook_message(request, user_profile, subject, body)\n return json_success()\n\n\ndef get_subject_for_http_request(payload: Dict[str, Any]) -> str:\n webhook_event_data = payload[\"webhook_event_data\"]\n if webhook_event_data[\"application_name\"] == \"Webhook test\":\n subject = FRESHPING_TOPIC_TEMPLATE_TEST\n else:\n subject = FRESHPING_TOPIC_TEMPLATE.format(check_name=webhook_event_data[\"check_name\"])\n\n return subject\n\n\ndef get_body_for_http_request(payload: Dict[str, Any]) -> str:\n webhook_event_data = payload[\"webhook_event_data\"]\n if webhook_event_data[\"check_state_name\"] == \"Reporting Error\":\n body = FRESHPING_MESSAGE_TEMPLATE_UNREACHABLE.format(**webhook_event_data)\n elif webhook_event_data[\"check_state_name\"] == \"Available\":\n if webhook_event_data[\"application_name\"] == \"Webhook test\":\n body = get_setup_webhook_message(\"Freshping\")\n else:\n body = FRESHPING_MESSAGE_TEMPLATE_UP.format(**webhook_event_data)\n\n return body\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
print("gist test file4")
|
normal
|
{
"blob_id": "ec4725b5b60d10e86b29aab3723917ace5cf52f6",
"index": 8452,
"step-1": "<mask token>\n",
"step-2": "print('gist test file4')\n",
"step-3": "print(\"gist test file4\")",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
class AbstractLayer(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@classmethod
def filter(cls, *args, **kwargs) ->models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
<|reserved_special_token_0|>
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE,
related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=
'forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text=
'Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseManager(models.Manager):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) ->(object or None):
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
return None
@classmethod
def filter(cls, *args, **kwargs) ->models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) ->None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE,
related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=
'forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text=
'Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseManager(models.Manager):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_queryset(self):
super(BaseManager, self).get_queryset().order_by('-created_time')
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) ->(object or None):
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
return None
@classmethod
def filter(cls, *args, **kwargs) ->models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) ->None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE,
related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=
'forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text=
'Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class BaseManager(models.Manager):
<|reserved_special_token_0|>
use_for_related_fields = True
def get_queryset(self):
super(BaseManager, self).get_queryset().order_by('-created_time')
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) ->(object or None):
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
return None
@classmethod
def filter(cls, *args, **kwargs) ->models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) ->None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE,
related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=
'forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text=
'Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
<|reserved_special_token_1|>
from django.contrib.postgres.fields import JSONField
from django.db import models
from core.utils.time import get_now
class BaseManager(models.Manager):
"""
Our basic manager is used to order all child models of AbstractLayer
by created time (descending), therefore it creates a LIFO order,
causing the recent ones appear first in results.
"""
use_for_related_fields = True
def get_queryset(self):
super(BaseManager, self).get_queryset().order_by('-created_time')
class AbstractLayer(models.Model):
"""
All basic abstraction is done here.
Also, we'll implement some methods which will simplify the work with models.
"""
# let's configure managers
default_manager = BaseManager
objects = BaseManager
all_objects = models.Manager
# All objects in our database are gonna have time of creation and last updated time.
created_time = models.DateTimeField(default=get_now)
last_updated_time = models.DateTimeField(default=get_now)
@classmethod
def get(cls, *args, **kwargs) -> object or None:
"""
We use our custom get method to avoid errors (like Not Found).
This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).
:param args:
:param kwargs:
:return: object of model
"""
try:
return cls.objects.get(*args, **kwargs)
except cls.DoesNotExist:
# if objects does not exist, we use None
return None
@classmethod
def filter(cls, *args, **kwargs) -> models.QuerySet:
"""
Just to reduce the model.objects.filter to model.filter
:param args:
:param kwargs:
:return: QuerySet
"""
return cls.objects.filter(*args, **kwargs)
@classmethod
def all(cls):
"""
Shortcut for model.objects.all
"""
return cls.objects.all()
def save(self, *args, **kwargs) -> None:
"""
We won't be using auto_now and auto_add_now for created_time and last_updated_time,
since they might cause unintentional errors in future.
Instead we implement custom save method to update them.
:param args:
:param kwargs:
:return: None
"""
self.last_updated_time = get_now()
super(AbstractLayer, self).save(*args, **kwargs)
@classmethod
def create(cls, *args, **kwargs):
"""
Since we are not using auto fields for created_time,
we will be implementing our custom create method to take care of that.
Also, we reduce model.objects.create to model.create.
:param args:
:param kwargs:
:return: created object
"""
now = get_now()
obj = cls(
*args,
**kwargs,
created_time=now,
last_updated_time=now
)
obj.save()
return obj
class Meta:
abstract = True
class Country(AbstractLayer):
code = models.CharField(max_length=7, unique=True)
def __str__(self):
return self.code
class Meta:
db_table = 'countries'
class City(AbstractLayer):
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
country = models.ForeignKey(Country, on_delete=models.CASCADE, related_name='cities')
lon = models.CharField(max_length=31)
lat = models.CharField(max_length=31)
def __str__(self):
return self.name
class Meta:
db_table = 'cities'
class Forecast(AbstractLayer):
city = models.ForeignKey(City, on_delete=models.CASCADE, related_name='forecasts')
detailed_status = models.CharField(max_length=1023, blank=True, null=True)
data = JSONField(blank=True, null=True, help_text='Whole JSON data representing the forecast details')
time = models.DateTimeField()
class Meta:
db_table = 'forecasts'
|
flexible
|
{
"blob_id": "5a33aeffa740a41bd0bd1d80f45796ae37377a4c",
"index": 757,
"step-1": "<mask token>\n\n\nclass AbstractLayer(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def filter(cls, *args, **kwargs) ->models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n <mask token>\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)\n obj.save()\n return obj\n\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE,\n related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=\n 'forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text=\n 'Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n\n class Meta:\n db_table = 'forecasts'\n",
"step-2": "<mask token>\n\n\nclass BaseManager(models.Manager):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass AbstractLayer(models.Model):\n \"\"\"\n All basic abstraction is done here.\n Also, we'll implement some methods which will simplify the work with models.\n \"\"\"\n default_manager = BaseManager\n objects = BaseManager\n all_objects = models.Manager\n created_time = models.DateTimeField(default=get_now)\n last_updated_time = models.DateTimeField(default=get_now)\n\n @classmethod\n def get(cls, *args, **kwargs) ->(object or None):\n \"\"\"\n We use our custom get method to avoid errors (like Not Found).\n This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).\n :param args:\n :param kwargs:\n :return: object of model\n \"\"\"\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n return None\n\n @classmethod\n def filter(cls, *args, **kwargs) ->models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n\n def save(self, *args, **kwargs) ->None:\n \"\"\"\n We won't be using auto_now and auto_add_now for created_time and last_updated_time,\n since they might cause unintentional errors in future.\n Instead we implement custom save method to update them.\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)\n obj.save()\n return obj\n\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE,\n related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=\n 'forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text=\n 'Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n\n class Meta:\n db_table = 'forecasts'\n",
"step-3": "<mask token>\n\n\nclass BaseManager(models.Manager):\n <mask token>\n <mask token>\n\n def get_queryset(self):\n super(BaseManager, self).get_queryset().order_by('-created_time')\n\n\nclass AbstractLayer(models.Model):\n \"\"\"\n All basic abstraction is done here.\n Also, we'll implement some methods which will simplify the work with models.\n \"\"\"\n default_manager = BaseManager\n objects = BaseManager\n all_objects = models.Manager\n created_time = models.DateTimeField(default=get_now)\n last_updated_time = models.DateTimeField(default=get_now)\n\n @classmethod\n def get(cls, *args, **kwargs) ->(object or None):\n \"\"\"\n We use our custom get method to avoid errors (like Not Found).\n This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).\n :param args:\n :param kwargs:\n :return: object of model\n \"\"\"\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n return None\n\n @classmethod\n def filter(cls, *args, **kwargs) ->models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n\n def save(self, *args, **kwargs) ->None:\n \"\"\"\n We won't be using auto_now and auto_add_now for created_time and last_updated_time,\n since they might cause unintentional errors in future.\n Instead we implement custom save method to update them.\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)\n obj.save()\n return obj\n\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE,\n related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=\n 'forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text=\n 'Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n\n class Meta:\n db_table = 'forecasts'\n",
"step-4": "<mask token>\n\n\nclass BaseManager(models.Manager):\n <mask token>\n use_for_related_fields = True\n\n def get_queryset(self):\n super(BaseManager, self).get_queryset().order_by('-created_time')\n\n\nclass AbstractLayer(models.Model):\n \"\"\"\n All basic abstraction is done here.\n Also, we'll implement some methods which will simplify the work with models.\n \"\"\"\n default_manager = BaseManager\n objects = BaseManager\n all_objects = models.Manager\n created_time = models.DateTimeField(default=get_now)\n last_updated_time = models.DateTimeField(default=get_now)\n\n @classmethod\n def get(cls, *args, **kwargs) ->(object or None):\n \"\"\"\n We use our custom get method to avoid errors (like Not Found).\n This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).\n :param args:\n :param kwargs:\n :return: object of model\n \"\"\"\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n return None\n\n @classmethod\n def filter(cls, *args, **kwargs) ->models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n\n def save(self, *args, **kwargs) ->None:\n \"\"\"\n We won't be using auto_now and auto_add_now for created_time and last_updated_time,\n since they might cause unintentional errors in future.\n Instead we implement custom save method to update them.\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(*args, **kwargs, created_time=now, last_updated_time=now)\n obj.save()\n return obj\n\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE,\n related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name=\n 'forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text=\n 'Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n\n class Meta:\n db_table = 'forecasts'\n",
"step-5": "from django.contrib.postgres.fields import JSONField\nfrom django.db import models\n\nfrom core.utils.time import get_now\n\n\nclass BaseManager(models.Manager):\n \"\"\"\n Our basic manager is used to order all child models of AbstractLayer\n by created time (descending), therefore it creates a LIFO order,\n causing the recent ones appear first in results.\n \"\"\"\n use_for_related_fields = True\n\n def get_queryset(self):\n super(BaseManager, self).get_queryset().order_by('-created_time')\n\n\nclass AbstractLayer(models.Model):\n \"\"\"\n All basic abstraction is done here.\n Also, we'll implement some methods which will simplify the work with models.\n \"\"\"\n\n # let's configure managers\n default_manager = BaseManager\n objects = BaseManager\n all_objects = models.Manager\n\n # All objects in our database are gonna have time of creation and last updated time.\n created_time = models.DateTimeField(default=get_now)\n last_updated_time = models.DateTimeField(default=get_now)\n\n @classmethod\n def get(cls, *args, **kwargs) -> object or None:\n \"\"\"\n We use our custom get method to avoid errors (like Not Found).\n This way we won't have to use try/except for the rest of our codebase (at least for non-existing objects).\n :param args:\n :param kwargs:\n :return: object of model\n \"\"\"\n try:\n return cls.objects.get(*args, **kwargs)\n except cls.DoesNotExist:\n # if objects does not exist, we use None\n return None\n\n @classmethod\n def filter(cls, *args, **kwargs) -> models.QuerySet:\n \"\"\"\n Just to reduce the model.objects.filter to model.filter\n :param args:\n :param kwargs:\n :return: QuerySet\n \"\"\"\n return cls.objects.filter(*args, **kwargs)\n\n @classmethod\n def all(cls):\n \"\"\"\n Shortcut for model.objects.all\n \"\"\"\n return cls.objects.all()\n\n def save(self, *args, **kwargs) -> None:\n \"\"\"\n We won't be using auto_now and auto_add_now for created_time and last_updated_time,\n since they might cause unintentional errors in future.\n Instead we implement custom save method to update them.\n :param args:\n :param kwargs:\n :return: None\n \"\"\"\n self.last_updated_time = get_now()\n super(AbstractLayer, self).save(*args, **kwargs)\n\n @classmethod\n def create(cls, *args, **kwargs):\n \"\"\"\n Since we are not using auto fields for created_time,\n we will be implementing our custom create method to take care of that.\n Also, we reduce model.objects.create to model.create.\n :param args:\n :param kwargs:\n :return: created object\n \"\"\"\n now = get_now()\n obj = cls(\n *args,\n **kwargs,\n created_time=now,\n last_updated_time=now\n )\n obj.save()\n return obj\n\n class Meta:\n abstract = True\n\n\nclass Country(AbstractLayer):\n code = models.CharField(max_length=7, unique=True)\n\n def __str__(self):\n return self.code\n\n class Meta:\n db_table = 'countries'\n\n\nclass City(AbstractLayer):\n name = models.CharField(max_length=255)\n state = models.CharField(max_length=255)\n country = models.ForeignKey(Country, on_delete=models.CASCADE, related_name='cities')\n lon = models.CharField(max_length=31)\n lat = models.CharField(max_length=31)\n\n def __str__(self):\n return self.name\n\n class Meta:\n db_table = 'cities'\n\n\nclass Forecast(AbstractLayer):\n city = models.ForeignKey(City, on_delete=models.CASCADE, related_name='forecasts')\n detailed_status = models.CharField(max_length=1023, blank=True, null=True)\n data = JSONField(blank=True, null=True, help_text='Whole JSON data representing the forecast details')\n time = models.DateTimeField()\n\n class Meta:\n db_table = 'forecasts'\n",
"step-ids": [
12,
17,
18,
19,
22
]
}
|
[
12,
17,
18,
19,
22
] |
seq = input('write a sequence of numbers: ')
print(seq.split(','))
print(tuple(seq.split(',')))
|
normal
|
{
"blob_id": "be867d600f5f267986368f5573006f63004dbf9e",
"index": 5094,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(seq.split(','))\nprint(tuple(seq.split(',')))\n",
"step-3": "seq = input('write a sequence of numbers: ')\nprint(seq.split(','))\nprint(tuple(seq.split(',')))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.