index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
500 |
040942e2e09b5c2df5c08207b9c033471b117608
|
from flask import Flask, url_for, render_template, request
import os
import blescan
import sys
import requests
import logging
from logging.handlers import RotatingFileHandler
import json
from datetime import datetime
import bluetooth._bluetooth as bluez
app = Flask(__name__)
@app.route('/sivut/')
def default_page():
dev_id = 0
try:
sock = bluez.hci_open_dev(dev_id)
app.logger.info("ble thread started")
except:
app.logger.info("error accessing bluetooth device...")
sys.exit(1)
blescan.hci_le_set_scan_parameters(sock)
blescan.hci_enable_le_scan(sock)
returnedList = blescan.parse_events(sock, 10)
app.logger.info(returnedList)
print "----------"
setti = set()
stop_name = ""
for beacon in returnedList:
if '2f234454cf6d4a0fadf2f4911ba9ffa6' in beacon:
app.logger.info("beacon loydetty")
r = requests.get("http://stop2.herokuapp.com/stop/2f234454-cf6d-4a0f-adf2-f4911ba9ffa6")
content = r.content
content = json.loads(content)
stop_name = content['stop_name']
palautus = "<h3>Press button to stop bus:</h3> "
for asd in content['schedule']:
setti.add(asd['line'])
arrival = datetime.fromtimestamp(int(asd['arrival'])).strftime('%H:%M')
palautus += " <div class='btn btn-lg stop_bus' style='margin:5px;color:white;background:#F092CD;' id='" + asd['line'] + "'>" + asd['line'] + " " + arrival \
+ "</div> "
content = palautus
break
else:
content = "<h3>You're not near stop</h3>"
app.logger.info("beacon EI loydetty")
return render_template('index_templatelocal.html', content=content, setti=setti, stop_name=stop_name)
@app.route('/stops')
def show_stops():
stops = '''
{"name": "718 to Rautatientori (HSL:1020201)", "stops": [
{"code": "3032", "name": "Valtimontie", "gtfsId": "HSL:1240123"},
{"code": "3030", "name": "Sumatrantie", "gtfsId": "HSL:1240106"},
{"code": "3028", "name": "Kumpulan kampus", "gtfsId": "HSL:1240118"},
{"code": "3024", "name": "Vallilan varikko", "gtfsId": "HSL:1220104"},
{"code": "3022", "name": "Ristikkokatu", "gtfsId": "HSL:1220102"},
{"code": "2410", "name": "S\u00f6rn\u00e4inen(M)", "gtfsId": "HSL:1113131"},
{"code": "2404", "name": "Haapaniemi", "gtfsId": "HSL:1112126"},
{"code": "2402", "name": "Hakaniemi", "gtfsId": "HSL:1111114"},
{"code": null, "name": "Rautatientori", "gtfsId": "HSL:1020201"}]}
'''
return render_template('show_stops.html', stops=json.loads(stops))
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5050))
handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.run(host='0.0.0.0', port = port)
|
[
" \nfrom flask import Flask, url_for, render_template, request\nimport os\nimport blescan\nimport sys\nimport requests\nimport logging\nfrom logging.handlers import RotatingFileHandler\nimport json\nfrom datetime import datetime\n\nimport bluetooth._bluetooth as bluez\n\n\napp = Flask(__name__)\n\n\[email protected]('/sivut/')\ndef default_page():\n dev_id = 0\n try:\n sock = bluez.hci_open_dev(dev_id)\n app.logger.info(\"ble thread started\")\n except:\n app.logger.info(\"error accessing bluetooth device...\")\n sys.exit(1)\n\n blescan.hci_le_set_scan_parameters(sock)\n blescan.hci_enable_le_scan(sock)\n\n returnedList = blescan.parse_events(sock, 10)\n app.logger.info(returnedList)\n print \"----------\"\n setti = set()\n stop_name = \"\"\n for beacon in returnedList:\n if '2f234454cf6d4a0fadf2f4911ba9ffa6' in beacon:\n app.logger.info(\"beacon loydetty\")\n r = requests.get(\"http://stop2.herokuapp.com/stop/2f234454-cf6d-4a0f-adf2-f4911ba9ffa6\")\n content = r.content\n content = json.loads(content)\n stop_name = content['stop_name']\n palautus = \"<h3>Press button to stop bus:</h3> \"\n for asd in content['schedule']:\n setti.add(asd['line'])\n arrival = datetime.fromtimestamp(int(asd['arrival'])).strftime('%H:%M')\n palautus += \" <div class='btn btn-lg stop_bus' style='margin:5px;color:white;background:#F092CD;' id='\" + asd['line'] + \"'>\" + asd['line'] + \" \" + arrival \\\n + \"</div> \"\n content = palautus\n break\n else:\n content = \"<h3>You're not near stop</h3>\"\n app.logger.info(\"beacon EI loydetty\")\n return render_template('index_templatelocal.html', content=content, setti=setti, stop_name=stop_name)\n\n\[email protected]('/stops')\ndef show_stops():\n stops = '''\n {\"name\": \"718 to Rautatientori (HSL:1020201)\", \"stops\": [\n {\"code\": \"3032\", \"name\": \"Valtimontie\", \"gtfsId\": \"HSL:1240123\"},\n {\"code\": \"3030\", \"name\": \"Sumatrantie\", \"gtfsId\": \"HSL:1240106\"},\n {\"code\": \"3028\", \"name\": \"Kumpulan kampus\", \"gtfsId\": \"HSL:1240118\"},\n {\"code\": \"3024\", \"name\": \"Vallilan varikko\", \"gtfsId\": \"HSL:1220104\"},\n {\"code\": \"3022\", \"name\": \"Ristikkokatu\", \"gtfsId\": \"HSL:1220102\"},\n {\"code\": \"2410\", \"name\": \"S\\u00f6rn\\u00e4inen(M)\", \"gtfsId\": \"HSL:1113131\"},\n {\"code\": \"2404\", \"name\": \"Haapaniemi\", \"gtfsId\": \"HSL:1112126\"},\n {\"code\": \"2402\", \"name\": \"Hakaniemi\", \"gtfsId\": \"HSL:1111114\"},\n {\"code\": null, \"name\": \"Rautatientori\", \"gtfsId\": \"HSL:1020201\"}]}\n '''\n return render_template('show_stops.html', stops=json.loads(stops))\n\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5050))\n handler = RotatingFileHandler('foo.log', maxBytes=10000, backupCount=1)\n handler.setLevel(logging.INFO)\n app.logger.addHandler(handler)\n app.run(host='0.0.0.0', port = port)\n\n\n"
] | true |
501 |
9ab3dd87f17ac75a3831e9ec1f0746ad81fad70d
|
# Any object containing execute(self) method is considered to be IDE App
# this is Duck typing concept
class PyCharm:
def execute(self):
print("pycharm ide runnig")
class MyIde:
def execute(self):
print("MyIde running")
class Laptop:
def code(self,ide):
ide.execute()
ide=MyIde()
obj=Laptop()
obj.code(ide)
|
[
"\r\n# Any object containing execute(self) method is considered to be IDE App\r\n# this is Duck typing concept\r\n\r\nclass PyCharm:\r\n def execute(self):\r\n print(\"pycharm ide runnig\")\r\n\r\nclass MyIde:\r\n def execute(self):\r\n print(\"MyIde running\")\r\n\r\nclass Laptop:\r\n\r\n def code(self,ide):\r\n ide.execute()\r\n\r\nide=MyIde()\r\n\r\nobj=Laptop()\r\n\r\nobj.code(ide)\r\n",
"class PyCharm:\n\n def execute(self):\n print('pycharm ide runnig')\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\nide = MyIde()\nobj = Laptop()\nobj.code(ide)\n",
"class PyCharm:\n\n def execute(self):\n print('pycharm ide runnig')\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<assignment token>\nobj.code(ide)\n",
"class PyCharm:\n\n def execute(self):\n print('pycharm ide runnig')\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<assignment token>\n<code token>\n",
"class PyCharm:\n <function token>\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<assignment token>\n<code token>\n",
"<class token>\n\n\nclass MyIde:\n\n def execute(self):\n print('MyIde running')\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<assignment token>\n<code token>\n",
"<class token>\n\n\nclass MyIde:\n <function token>\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<assignment token>\n<code token>\n",
"<class token>\n<class token>\n\n\nclass Laptop:\n\n def code(self, ide):\n ide.execute()\n\n\n<assignment token>\n<code token>\n",
"<class token>\n<class token>\n\n\nclass Laptop:\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<class token>\n<class token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
502 |
6d61df9ac072100d01a1ce3cf7b4c056f66a163c
|
import pygame
import sys
import time
import random
from snake_gym.envs.modules import *
from pygame.locals import *
import numpy as np
class SnakeGame(object):
def __init__(self):
self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
self.surface = pygame.Surface(self.screen.get_size())
self.surface = self.surface.convert()
self.surface.fill((255, 255, 255))
self.clock = pygame.time.Clock()
self.fps = 60
self.done = False
pygame.key.set_repeat(1, 40)
self.screen.blit(self.surface, (0, 0))
pygame.init()
self.fpsClock = pygame.time.Clock()
self.snake = Snake()
self.apple = Apple()
def reset(self):
return SnakeGame._get_image(self.surface)
def step(self, key):
length = self.snake.length
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
self.done = True
act = [UP, DOWN, LEFT, RIGHT]
self.snake.point(act[key])
self.surface.fill((255, 255, 255))
try:
self.snake.move()
except SnakeException:
self.done = True
if self.done:
state = SnakeGame._get_image(self.surface)
return state, length, self.done, {}
check_eat(self.snake, self.apple)
self.snake.draw(self.surface)
self.apple.draw(self.surface)
font = pygame.font.Font(None, 36)
text = font.render(str(self.snake.length), 1, (10, 10, 10))
text_pos = text.get_rect()
text_pos.centerx = 20
self.surface.blit(text, text_pos)
self.screen.blit(self.surface, (0, 0))
state = SnakeGame._get_image(self.surface)
pygame.display.flip()
pygame.display.update()
self.fpsClock.tick(self.fps + self.snake.length / 3)
return state, self.snake.length, False, {}
@staticmethod
def _get_image(surface):
ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT, SCREEN_WIDTH))))
for j in range(SCREEN_HEIGHT):
for k in range(SCREEN_WIDTH):
ret[j][k] = surface.get_at((k, j))
return np.array(ret)
|
[
"import pygame\nimport sys\nimport time\nimport random\nfrom snake_gym.envs.modules import *\nfrom pygame.locals import *\nimport numpy as np\n\n\nclass SnakeGame(object):\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n\n pygame.key.set_repeat(1, 40)\n\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT, SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"import pygame\nimport sys\nimport time\nimport random\nfrom snake_gym.envs.modules import *\nfrom pygame.locals import *\nimport numpy as np\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"<import token>\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n\n @staticmethod\n def _get_image(surface):\n ret = list(map(lambda x: list(x), np.zeros((SCREEN_HEIGHT,\n SCREEN_WIDTH))))\n for j in range(SCREEN_HEIGHT):\n for k in range(SCREEN_WIDTH):\n ret[j][k] = surface.get_at((k, j))\n return np.array(ret)\n",
"<import token>\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n\n def step(self, key):\n length = self.snake.length\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n self.done = True\n act = [UP, DOWN, LEFT, RIGHT]\n self.snake.point(act[key])\n self.surface.fill((255, 255, 255))\n try:\n self.snake.move()\n except SnakeException:\n self.done = True\n if self.done:\n state = SnakeGame._get_image(self.surface)\n return state, length, self.done, {}\n check_eat(self.snake, self.apple)\n self.snake.draw(self.surface)\n self.apple.draw(self.surface)\n font = pygame.font.Font(None, 36)\n text = font.render(str(self.snake.length), 1, (10, 10, 10))\n text_pos = text.get_rect()\n text_pos.centerx = 20\n self.surface.blit(text, text_pos)\n self.screen.blit(self.surface, (0, 0))\n state = SnakeGame._get_image(self.surface)\n pygame.display.flip()\n pygame.display.update()\n self.fpsClock.tick(self.fps + self.snake.length / 3)\n return state, self.snake.length, False, {}\n <function token>\n",
"<import token>\n\n\nclass SnakeGame(object):\n\n def __init__(self):\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),\n 0, 32)\n self.surface = pygame.Surface(self.screen.get_size())\n self.surface = self.surface.convert()\n self.surface.fill((255, 255, 255))\n self.clock = pygame.time.Clock()\n self.fps = 60\n self.done = False\n pygame.key.set_repeat(1, 40)\n self.screen.blit(self.surface, (0, 0))\n pygame.init()\n self.fpsClock = pygame.time.Clock()\n self.snake = Snake()\n self.apple = Apple()\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n <function token>\n <function token>\n",
"<import token>\n\n\nclass SnakeGame(object):\n <function token>\n\n def reset(self):\n return SnakeGame._get_image(self.surface)\n <function token>\n <function token>\n",
"<import token>\n\n\nclass SnakeGame(object):\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
503 |
d69bffb85d81ab3969bfe7dfe2759fa809890208
|
# Generated by Django 3.1.1 on 2020-10-07 04:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articals', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='artical',
name='thumb',
field=models.ImageField(blank=True, default='default.png', upload_to='media/'),
),
]
|
[
"# Generated by Django 3.1.1 on 2020-10-07 04:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articals', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='artical',\n name='thumb',\n field=models.ImageField(blank=True, default='default.png', upload_to='media/'),\n ),\n ]\n",
"from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('articals', '0001_initial')]\n operations = [migrations.AddField(model_name='artical', name='thumb',\n field=models.ImageField(blank=True, default='default.png',\n upload_to='media/'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('articals', '0001_initial')]\n operations = [migrations.AddField(model_name='artical', name='thumb',\n field=models.ImageField(blank=True, default='default.png',\n upload_to='media/'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
504 |
2ff85ac059f160fcc6b39b4298e8216cbad77ab3
|
import http.server
import socketserver
from http.server import BaseHTTPRequestHandler, HTTPServer
import time
import json
import io
import urllib
import requests
from lib.Emby_ws import xnoppo_ws
from lib.Emby_http import *
from lib.Xnoppo import *
from lib.Xnoppo_TV import *
import lib.Xnoppo_AVR
import shutil
import asyncio
import threading
import logging
import logging.handlers
import psutil
def get_version():
return("2.01")
def thread_function(ws_object):
print("Thread: starting")
ws_object.start()
print("Thread: finishing")
def restart():
print('restart')
try:
emby_wsocket.stop()
except:
sys.exit()
sys.exit()
print('fin restart')
def save_config(config_file, config):
with open(config_file, 'w') as fw:
json.dump(config, fw, indent=4)
fw.close
try:
emby_wsocket.ws_config=config
emby_wsocket.EmbySession.config=config
except:
emby_wsocket.ws_config=config
def get_state():
status={}
status["Version"]=get_version()
try:
status["Playstate"]=emby_wsocket.EmbySession.playstate
status["playedtitle"]=emby_wsocket.EmbySession.playedtitle
status["server"]=emby_wsocket.EmbySession.server
status["folder"]=emby_wsocket.EmbySession.folder
status["filename"]=emby_wsocket.EmbySession.filename
status["CurrentData"]=emby_wsocket.EmbySession.currentdata
# gives a single float value
except:
status["Playstate"]="Not_Connected"
status["playedtitle"]=""
status["server"]=""
status["folder"]=""
status["filename"]=""
status["CurrentData"]=""
status["cpu_perc"]=psutil.cpu_percent()
status["mem_perc"]=psutil.virtual_memory().percent
# you can have the percentage of used RAM
print(psutil.virtual_memory().percent)
print(status)
return(status)
def cargar_config(config_file,tv_path,av_path,lang_path):
with open(config_file, 'r') as f:
config = json.load(f)
#ver_configuracion(config)
f.close
## new options default config values
config["Version"]=get_version()
default = config.get("Autoscript", False)
config["Autoscript"]=default
default = config.get("enable_all_libraries", False)
config["enable_all_libraries"]=default
default = config.get("TV_model", "")
config["TV_model"]=default
default = config.get("TV_SOURCES", [])
config["TV_SOURCES"] = default
default = config.get("AV_model", "")
config["AV_model"]=default
default = config.get("AV_SOURCES", [])
config["AV_SOURCES"] = default
default = config.get("TV_script_init", "")
config["TV_script_init"]=default
default = config.get("TV_script_end", "")
config["TV_script_end"]=default
default = config.get("av_delay_hdmi", 0)
config["av_delay_hdmi"]=default
default = config.get("AV_Port", 23)
config["AV_Port"]=default
default = config.get("timeout_oppo_mount", 60)
config["timeout_oppo_mount"]=default
default = config.get("language","es-ES")
config["language"]=default
default = config.get("default_nfs",False)
config["default_nfs"]=default
default = config.get("wait_nfs",False)
config["wait_nfs"]=default
default = config.get("refresh_time",5)
config["refresh_time"]=default
default = config.get("check_beta",False)
config["check_beta"]=default
default = config.get("smbtrick",False)
config["smbtrick"]=default
default = config.get("BRDisc",False)
config["BRDisc"]=default
## testeado de rutas
edit_server=0
server_list = config["servers"]
for server in server_list:
default = server.get("Test_OK", False)
server_list[edit_server]["Test_OK"]=default
edit_server=edit_server+1
## Cambio de booleans de texto antiguos a boleans actuales.
if config["TV"]=='True':
config["TV"]=True;
if config["TV"]=='False':
config["TV"]=False;
if config["AV"]=='True':
config["AV"]=True;
if config["AV"]=='False':
config["AV"]=False;
config["servers"]=server_list
config["tv_dirs"]=get_dir_folders(tv_path);
config["av_dirs"]=get_dir_folders(av_path);
config["langs"]=get_dir_folders(lang_path);
return(config)
def check_version(config):
url = "https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js"
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
print(config["check_beta"])
if config["check_beta"]==True:
last_version=version["beta_version"]
last_version_file=version["beta_version_file"]
else:
last_version=version["curr_version"]
last_version_file=version["curr_version_file"]
xno_version=get_version()
resp = {}
resp["version"]=last_version
resp["file"]=last_version_file
print(xno_version)
print(last_version)
if xno_version<last_version:
resp["new_version"]=True
else:
resp["new_version"]=False
print(resp)
return(resp)
def update_version(config,vers_path,cwd):
url = "https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js"
headers = {}
response = requests.get(url, headers=headers)
version = json.loads(response.text)
print(version)
if config["check_beta"]==True:
last_version=version["beta_version"]
last_version_file=version["beta_version_file"]
else:
last_version=version["curr_version"]
last_version_file=version["curr_version_file"]
url2 = "https://github.com/siberian-git/Xnoppo/raw/main/versions/" + last_version_file
headers = {}
response2 = requests.get(url2, headers=headers)
filename=vers_path + last_version_file
with open(filename, 'wb') as f:
f.write(response2.content)
f.close()
shutil.unpack_archive(filename, cwd)
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
if config["TV"]==True and config["TV_model"]!="":
move_files(tv_path + config["TV_model"],lib_path)
if config["AV"]==True and config["AV_model"]!="":
move_files(av_path + config["AV_model"],lib_path)
resp = {}
resp["version"]=last_version
resp["file"]=last_version_file
resp["new_version"]=False
return(resp)
def cargar_lang(config_file):
with open(config_file.encode(sys.getfilesystemencoding()), 'r',encoding='latin-1') as f:
config = json.load(f)
#ver_configuracion(config)
f.close
## new options default config values
return(config)
def leer_file(web_file):
with open(web_file, 'r',encoding='utf8') as f:
num=f.read()
f.close
return(num)
def leer_img(web_file):
with open(web_file, 'rb') as f:
num=f.read()
f.close
return(num)
def test_path(config,server):
rutas = get_mount_path(server["Emby_Path"] + "/test.mkv",server)
result2 = test_mount_path(config,rutas["Servidor"],rutas["Carpeta"])
return(result2)
def get_mount_path(movie,server_data):
movie = movie.replace(server_data["Emby_Path"],server_data["Oppo_Path"])
movie = movie.replace('\\\\','\\')
movie = movie.replace('\\','/')
word = '/'
inicio = movie.find(word)
inicio = inicio +1
final = movie.find(word,inicio,len(movie))
servidor = movie[inicio:final]
ultimo=final+1
result=final+1
while result > 0:
ultimo=result+1
result=movie.find(word,ultimo,len(movie))
fichero=movie[ultimo:len(movie)]
final=final+1
ultimo=ultimo-1
carpeta=movie[final:ultimo]
resultado={}
resultado["Servidor"]=servidor
resultado["Carpeta"]=carpeta
resultado["Fichero"]=fichero
return(resultado)
def test_mount_path(config,servidor,carpeta):
sendnotifyremote(config["Oppo_IP"])
#print("Conectando con el OPPO")
result=check_socket(config)
if result==0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("EJT",config)
time.sleep(1)
#print("Solicitando montar ruta al OPPO")
response_data6b = getsetupmenu(config)
while response_data6f.find('devicelist":[]') > 0:
time.sleep(1)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("QPW",config)
device_list=json.loads(response_data6f)
if config["DebugLevel"]>0: print(device_list)
nfs=config["default_nfs"]
for device in device_list["devicelist"]:
if device["name"].upper()==servidor.upper():
if device["sub_type"]=="nfs":
nfs=True
break
else:
nfs=False
break
if nfs:
response_login = LoginNFS(config,servidor)
else:
response_login = LoginSambaWithOutID(config,servidor)
if config["Always_ON"]==False:
time.sleep(5)
response_data6b = getsetupmenu(config)
if nfs:
response_mount = mountSharedNFSFolder(servidor,carpeta,'','',config)
else:
response_mount = mountSharedFolder(servidor,carpeta,'','',config)
response=json.loads(response_mount)
#print(response)
if config["Autoscript"]==True:
result=umountSharedFolder(config)
if response["success"]==True:
a = "OK"
else:
a = "FAILURE"
return(a)
else:
print("No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo")
def test_emby(config):
try:
EmbySession=EmbyHttp(config)
user_info = EmbySession.user_info
if user_info["SessionInfo"]["Id"]!="":
return("OK")
else:
return("FAILED")
except:
return("FAILED")
def test_oppo(config):
result=check_socket(config)
if result==0:
return("OK")
else:
return("FAILED")
def carga_libraries(config):
try:
EmbySession=EmbyHttp(config)
views_list=EmbySession.get_user_views(EmbySession.user_info["User"]["Id"])
libraries = []
for view in views_list:
library= {}
library["Name"]=view["Name"]
library["Id"]=view["Id"]
library["Active"]=False
try:
lib_list=config["Libraries"]
except:
lib_list={}
for lib in lib_list:
if lib["Id"]==view["Id"]:
library["Active"]=lib["Active"]
libraries.append(library)
config["Libraries"]=libraries
return(0)
except:
return(1)
def is_library_active(config,libraryname):
for library in config["Libraries"]:
if library["Name"]==libraryname:
return(library["Active"])
return(False)
def get_selectableFolders(config):
EmbySession=EmbyHttp(config)
MediaFolders = EmbySession.get_emby_selectablefolders()
servers=[]
for Folder in MediaFolders:
index=1
active=is_library_active(config,Folder["Name"])
if config["enable_all_libraries"]==True:
active=True;
if active==True:
for SubFolder in Folder["SubFolders"]:
server={}
server["Id"]=SubFolder["Id"]
if index>1:
server["name"]=Folder["Name"]+"("+str(index)+")"
else:
server["name"]=Folder["Name"]
server["Emby_Path"]=SubFolder["Path"]
server["Oppo_Path"]="/"
try:
serv_list=config["servers"]
except:
serv_list={}
for serv in serv_list:
if server["Emby_Path"]==serv["Emby_Path"]:
server["name"]=serv["name"];
server["Oppo_Path"]=serv["Oppo_Path"];
server["Test_OK"]=serv["Test_OK"];
servers.append(server)
index=index+1
config["servers"]=servers
def get_dir_folders(directory):
os.chdir(directory)
dirs = os.listdir(".")
encontrado=False
list_dir=[]
#a =""
#list_dir.append(a)
for x in dirs:
if os.path.isdir(x):
list_dir.append(x)
return(list_dir)
def move_files(src, dest):
os.chdir(src)
src_files = os.listdir('.')
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if os.path.isfile(full_file_name):
shutil.copy(full_file_name, dest)
return(0)
def get_devices(config):
try:
EmbySession=EmbyHttp(config)
devices = EmbySession.get_emby_devices()
index=0
dev_temp = []
for device in devices["Items"]:
try:
if device["Id"]!='Xnoppo':
device["Name"]=device["Name"] + " / " + device["AppName"]
device["Id"]=device["ReportedDeviceId"]
dev_temp.append(device)
except:
pass
config["devices"]=dev_temp
return('OK')
except:
return('FAILURE')
class MyServer(BaseHTTPRequestHandler):
def do_GET(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/emby_conf.html':
i = leer_file(html_path + 'emby_conf.html')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/oppo_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'oppo_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/lib_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'lib_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/path_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'path_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/tv_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'tv_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/av_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'av_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/other_conf.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'other_conf.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/status.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'status.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/help.html':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_file(html_path + 'help.html')
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/remote.html':
i = leer_file(html_path + 'remote.html')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes(i,"utf-8"))
return(0)
if self.path == '/android-chrome-36x36.png':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'android-chrome-36x36.png')
self.wfile.write(bytes(i))
return(0)
if self.path == '/av-receiver-icon-2.jpg':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'av-receiver-icon-2.jpg')
self.wfile.write(bytes(i))
return(0)
if self.path == '/dragon.png':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
i = leer_img(resource_path + 'dragon.png')
self.wfile.write(bytes(i))
return(0)
if self.path == '/xnoppo_config':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/xnoppo_config_lib':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
carga_libraries(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/xnoppo_config_dev':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
get_devices(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/check_version':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = check_version(config)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/update_version':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = update_version(config,vers_path,cwd)
restart()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/get_state':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = get_state()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/restart':
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
a = "Restarting"
self.wfile.write(bytes(a,"utf-8"))
restart()
if self.path == '/refresh_paths':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
get_selectableFolders(a)
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/lang':
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = cargar_lang(lang_path + config["language"] + separador +'lang.js')
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path.find("/send_key?")>=0:
get_data = self.path
print(get_data)
a = len('/send_key?sendkey=')
b=get_data[a:len(get_data)]
print(b)
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
sendnotifyremote(config["Oppo_IP"])
result=check_socket(config)
if b=='PON':
if result==0:
response_data6a = getmainfirmwareversion(config)
response_data6c = getdevicelist(config)
response_data6b = getsetupmenu(config)
response_data6c = OppoSignin(config)
response_data6d = getdevicelist(config)
response_data6e = getglobalinfo(config)
response_data6f = getdevicelist(config)
response_data_on = sendremotekey("EJT",config)
if config["BRDisc"]==True:
time.sleep(1)
response_data_on = sendremotekey("EJT",config)
time.sleep(1)
response_data6b = getsetupmenu(config)
else:
response_data_on = sendremotekey(b,config)
self.send_response(200)
self.send_header("Content-type", "text")
self.end_headers()
a = "ok"
self.wfile.write(bytes(a,"utf-8"))
return(0)
if self.path == '/log.txt':
self.send_response(200)
self.send_header("Content-type", "text")
self.end_headers()
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')
self.wfile.write(bytes(a))
return(0)
else:
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>https://pythonbasics.org</title></head>", "utf-8"))
self.wfile.write(bytes("<p>Request: %s</p>" % self.path, "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
self.wfile.write(bytes("<p>This is an example web server.</p>", "utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
def do_POST(self):
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
print(self.path)
if self.path == '/save_config':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
if self.path == '/check_emby':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = test_emby(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
status = get_state()
if status["Playstate"]=="Not_Connected":
save_config(cwd + separador + 'config.json',config)
emby_wsocket.ws_config=config
restart()
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/check_oppo':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = test_oppo(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/test_path':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
server = json.loads(post_data.decode('utf-8'))
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = test_path(config,server)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(server))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(server),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/navigate_path':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
path_obj = json.loads(post_data.decode('utf-8'))
path = path_obj["path"]
config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)
a = navigate_folder(path,config)
a_json=json.dumps(a)
print(len(a_json))
self.send_response(200)
self.send_header("Content-Length", len(a_json))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(a),"utf-8"))
return(0)
if self.path == '/move_tv':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
move_files(tv_path + config["TV_model"],lib_path)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
restart()
return(0)
if self.path == '/move_av':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
save_config(cwd + separador + 'config.json',config)
move_files(av_path + config["AV_model"],lib_path)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
restart()
return(0)
if self.path == '/get_tv_key':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_tv_key(config)
if a == 'OK':
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_conn':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_test_conn(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/get_tv_sources':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_tv_sources(config)
if a == 'OK':
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/get_av_sources':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = get_hdmi_list(config)
if a != None:
config["AV_SOURCES"]=a
save_config(cwd + separador + 'config.json',config)
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_init':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/tv_test_end':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = tv_set_prev(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_on':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_check_power(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_off':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_power_off(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if self.path == '/av_test_hdmi':
content_length = int(self.headers['Content-Length']) # <--- Gets the size of data
post_data = self.rfile.read(content_length) # <--- Gets the data itself
config = json.loads(post_data.decode('utf-8'))
a = av_change_hdmi(config)
if a == 'OK':
self.send_response(200)
self.send_header("Content-Length", len(config))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes(json.dumps(config),"utf-8"))
else:
self.send_response(300)
self.send_header("Content-Length", len("ERROR"))
self.send_header("Content-Type", "text/html")
self.send_header('Access-Control-Allow-Credentials', 'true')
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(bytes("ERROR","utf-8"))
return(0)
if __name__ == "__main__":
cwd = os.path.dirname(os.path.abspath(__file__))
if sys.platform.startswith('win'):
separador="\\"
else:
separador="/"
config_file = cwd + separador + "config.json"
resource_path=cwd + separador + 'web' + separador + 'resources' + separador
html_path = cwd + separador + 'web' + separador
tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador
av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador
lib_path = cwd + separador + 'lib' + separador
lang_path = cwd + separador + 'web' + separador + 'lang' + separador
vers_path = cwd + separador + 'versions' + separador
config = cargar_config(config_file,tv_path,av_path,lang_path)
logfile=cwd + separador + "emby_xnoppo_client_logging.log"
lang = cargar_lang(lang_path + config["language"] + separador +'lang.js')
if config["DebugLevel"]==0:
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.CRITICAL)
elif config["DebugLevel"]==1:
rfh = logging.handlers.RotatingFileHandler(
filename=logfile,
mode='a',
maxBytes=50*1024*1024,
backupCount=2,
encoding=None,
delay=0
)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.INFO,handlers=[rfh])
elif config["DebugLevel"]==2:
rfh = logging.handlers.RotatingFileHandler(
filename=logfile,
mode='a',
maxBytes=5*1024*1024,
backupCount=2,
encoding=None,
delay=0
)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.DEBUG,handlers=[rfh])
emby_wsocket = xnoppo_ws()
emby_wsocket.ws_config=config
emby_wsocket.config_file=config_file
emby_wsocket.ws_lang=lang
x = threading.Thread(target=thread_function, args=(emby_wsocket,))
x.start()
espera=0
estado_anterior=''
logging.debug('Arrancamos el Servidor Web\n')
serverPort = 8090
webServer = HTTPServer(("", serverPort), MyServer)
print("Server started http://%s:%s" % ("", serverPort))
try:
webServer.serve_forever()
except KeyboardInterrupt:
pass
webServer.server_close()
logging.info('Fin proceso')
logging.info('Finished')
print("Server stopped.")
|
[
"import http.server\nimport socketserver\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport json\nimport io\nimport urllib\nimport requests\nfrom lib.Emby_ws import xnoppo_ws\nfrom lib.Emby_http import *\nfrom lib.Xnoppo import *\nfrom lib.Xnoppo_TV import *\nimport lib.Xnoppo_AVR\nimport shutil\nimport asyncio\nimport threading\nimport logging\nimport logging.handlers\nimport psutil\n\ndef get_version():\n return(\"2.01\")\n\ndef thread_function(ws_object):\n print(\"Thread: starting\")\n ws_object.start()\n print(\"Thread: finishing\")\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n \ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config=config\n emby_wsocket.EmbySession.config=config\n except:\n emby_wsocket.ws_config=config\ndef get_state():\n status={}\n status[\"Version\"]=get_version()\n try:\n status[\"Playstate\"]=emby_wsocket.EmbySession.playstate\n status[\"playedtitle\"]=emby_wsocket.EmbySession.playedtitle\n status[\"server\"]=emby_wsocket.EmbySession.server\n status[\"folder\"]=emby_wsocket.EmbySession.folder\n status[\"filename\"]=emby_wsocket.EmbySession.filename\n status[\"CurrentData\"]=emby_wsocket.EmbySession.currentdata\n # gives a single float value\n except:\n status[\"Playstate\"]=\"Not_Connected\"\n status[\"playedtitle\"]=\"\"\n status[\"server\"]=\"\"\n status[\"folder\"]=\"\"\n status[\"filename\"]=\"\"\n status[\"CurrentData\"]=\"\"\n status[\"cpu_perc\"]=psutil.cpu_percent()\n status[\"mem_perc\"]=psutil.virtual_memory().percent\n \n # you can have the percentage of used RAM\n print(psutil.virtual_memory().percent)\n\n\n print(status)\n return(status)\n\ndef cargar_config(config_file,tv_path,av_path,lang_path):\n\n with open(config_file, 'r') as f: \n config = json.load(f)\n #ver_configuracion(config)\n f.close\n ## new options default config values\n config[\"Version\"]=get_version()\n default = config.get(\"Autoscript\", False)\n config[\"Autoscript\"]=default\n default = config.get(\"enable_all_libraries\", False)\n config[\"enable_all_libraries\"]=default\n default = config.get(\"TV_model\", \"\")\n config[\"TV_model\"]=default\n default = config.get(\"TV_SOURCES\", [])\n config[\"TV_SOURCES\"] = default\n default = config.get(\"AV_model\", \"\")\n config[\"AV_model\"]=default\n default = config.get(\"AV_SOURCES\", [])\n config[\"AV_SOURCES\"] = default\n default = config.get(\"TV_script_init\", \"\")\n config[\"TV_script_init\"]=default\n default = config.get(\"TV_script_end\", \"\")\n config[\"TV_script_end\"]=default\n default = config.get(\"av_delay_hdmi\", 0)\n config[\"av_delay_hdmi\"]=default\n default = config.get(\"AV_Port\", 23)\n config[\"AV_Port\"]=default\n default = config.get(\"timeout_oppo_mount\", 60)\n config[\"timeout_oppo_mount\"]=default\n default = config.get(\"language\",\"es-ES\")\n config[\"language\"]=default\n default = config.get(\"default_nfs\",False)\n config[\"default_nfs\"]=default\n default = config.get(\"wait_nfs\",False)\n config[\"wait_nfs\"]=default\n default = config.get(\"refresh_time\",5)\n config[\"refresh_time\"]=default\n default = config.get(\"check_beta\",False)\n config[\"check_beta\"]=default\n default = config.get(\"smbtrick\",False)\n config[\"smbtrick\"]=default\n default = config.get(\"BRDisc\",False)\n config[\"BRDisc\"]=default\n\n ## testeado de rutas\n edit_server=0\n server_list = config[\"servers\"]\n for server in server_list:\n default = server.get(\"Test_OK\", False)\n server_list[edit_server][\"Test_OK\"]=default\n edit_server=edit_server+1\n ## Cambio de booleans de texto antiguos a boleans actuales.\n if config[\"TV\"]=='True':\n config[\"TV\"]=True;\n if config[\"TV\"]=='False':\n config[\"TV\"]=False;\n if config[\"AV\"]=='True':\n config[\"AV\"]=True;\n if config[\"AV\"]=='False':\n config[\"AV\"]=False;\n config[\"servers\"]=server_list\n config[\"tv_dirs\"]=get_dir_folders(tv_path);\n config[\"av_dirs\"]=get_dir_folders(av_path);\n config[\"langs\"]=get_dir_folders(lang_path);\n\n return(config)\n\ndef check_version(config):\n\n url = \"https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js\"\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config[\"check_beta\"])\n if config[\"check_beta\"]==True:\n last_version=version[\"beta_version\"]\n last_version_file=version[\"beta_version_file\"]\n else:\n last_version=version[\"curr_version\"]\n last_version_file=version[\"curr_version_file\"]\n xno_version=get_version()\n resp = {}\n resp[\"version\"]=last_version\n resp[\"file\"]=last_version_file\n print(xno_version)\n print(last_version)\n if xno_version<last_version:\n resp[\"new_version\"]=True\n else:\n resp[\"new_version\"]=False\n print(resp)\n return(resp)\n\ndef update_version(config,vers_path,cwd):\n\n url = \"https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js\"\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config[\"check_beta\"]==True:\n last_version=version[\"beta_version\"]\n last_version_file=version[\"beta_version_file\"]\n else:\n last_version=version[\"curr_version\"]\n last_version_file=version[\"curr_version_file\"]\n url2 = \"https://github.com/siberian-git/Xnoppo/raw/main/versions/\" + last_version_file\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename=vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n if config[\"TV\"]==True and config[\"TV_model\"]!=\"\":\n move_files(tv_path + config[\"TV_model\"],lib_path)\n if config[\"AV\"]==True and config[\"AV_model\"]!=\"\":\n move_files(av_path + config[\"AV_model\"],lib_path)\n resp = {}\n resp[\"version\"]=last_version\n resp[\"file\"]=last_version_file\n resp[\"new_version\"]=False\n return(resp)\n\ndef cargar_lang(config_file):\n\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',encoding='latin-1') as f: \n config = json.load(f)\n #ver_configuracion(config)\n f.close\n ## new options default config values\n return(config)\n\ndef leer_file(web_file):\n\n with open(web_file, 'r',encoding='utf8') as f:\n num=f.read()\n f.close\n return(num)\n\ndef leer_img(web_file):\n\n with open(web_file, 'rb') as f:\n num=f.read()\n f.close\n return(num)\n\n\ndef test_path(config,server):\n \n rutas = get_mount_path(server[\"Emby_Path\"] + \"/test.mkv\",server)\n result2 = test_mount_path(config,rutas[\"Servidor\"],rutas[\"Carpeta\"])\n return(result2)\n\ndef get_mount_path(movie,server_data):\n\n movie = movie.replace(server_data[\"Emby_Path\"],server_data[\"Oppo_Path\"])\n movie = movie.replace('\\\\\\\\','\\\\')\n movie = movie.replace('\\\\','/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio +1 \n final = movie.find(word,inicio,len(movie))\n servidor = movie[inicio:final]\n ultimo=final+1\n result=final+1\n while result > 0:\n ultimo=result+1\n result=movie.find(word,ultimo,len(movie))\n fichero=movie[ultimo:len(movie)]\n final=final+1\n ultimo=ultimo-1\n carpeta=movie[final:ultimo]\n resultado={}\n resultado[\"Servidor\"]=servidor\n resultado[\"Carpeta\"]=carpeta\n resultado[\"Fichero\"]=fichero\n return(resultado)\n\ndef test_mount_path(config,servidor,carpeta):\n sendnotifyremote(config[\"Oppo_IP\"])\n #print(\"Conectando con el OPPO\")\n result=check_socket(config)\n if result==0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"EJT\",config)\n time.sleep(1)\n #print(\"Solicitando montar ruta al OPPO\")\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"QPW\",config)\n device_list=json.loads(response_data6f)\n if config[\"DebugLevel\"]>0: print(device_list)\n nfs=config[\"default_nfs\"]\n for device in device_list[\"devicelist\"]:\n if device[\"name\"].upper()==servidor.upper():\n if device[\"sub_type\"]==\"nfs\":\n nfs=True\n break\n else:\n nfs=False\n break\n if nfs:\n response_login = LoginNFS(config,servidor)\n else:\n response_login = LoginSambaWithOutID(config,servidor)\n if config[\"Always_ON\"]==False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor,carpeta,'','',config)\n else:\n response_mount = mountSharedFolder(servidor,carpeta,'','',config)\n response=json.loads(response_mount)\n #print(response)\n if config[\"Autoscript\"]==True:\n result=umountSharedFolder(config)\n if response[\"success\"]==True:\n a = \"OK\"\n else:\n a = \"FAILURE\" \n return(a)\n else:\n print(\"No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo\")\n\ndef test_emby(config):\n try:\n EmbySession=EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info[\"SessionInfo\"][\"Id\"]!=\"\":\n return(\"OK\")\n else:\n return(\"FAILED\")\n except:\n return(\"FAILED\")\n\ndef test_oppo(config):\n result=check_socket(config)\n if result==0:\n return(\"OK\")\n else:\n return(\"FAILED\")\n\ndef carga_libraries(config):\n try:\n EmbySession=EmbyHttp(config)\n views_list=EmbySession.get_user_views(EmbySession.user_info[\"User\"][\"Id\"])\n libraries = []\n for view in views_list:\n library= {}\n library[\"Name\"]=view[\"Name\"]\n library[\"Id\"]=view[\"Id\"]\n library[\"Active\"]=False\n try:\n lib_list=config[\"Libraries\"]\n except:\n lib_list={}\n for lib in lib_list:\n if lib[\"Id\"]==view[\"Id\"]:\n library[\"Active\"]=lib[\"Active\"]\n libraries.append(library)\n config[\"Libraries\"]=libraries\n return(0)\n except:\n return(1)\ndef is_library_active(config,libraryname):\n for library in config[\"Libraries\"]:\n if library[\"Name\"]==libraryname:\n return(library[\"Active\"])\n return(False)\n\ndef get_selectableFolders(config):\n EmbySession=EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers=[]\n for Folder in MediaFolders:\n index=1\n active=is_library_active(config,Folder[\"Name\"])\n if config[\"enable_all_libraries\"]==True:\n active=True;\n if active==True:\n for SubFolder in Folder[\"SubFolders\"]: \n server={}\n server[\"Id\"]=SubFolder[\"Id\"]\n if index>1:\n server[\"name\"]=Folder[\"Name\"]+\"(\"+str(index)+\")\"\n else:\n server[\"name\"]=Folder[\"Name\"]\n server[\"Emby_Path\"]=SubFolder[\"Path\"]\n server[\"Oppo_Path\"]=\"/\"\n try:\n serv_list=config[\"servers\"]\n except:\n serv_list={}\n for serv in serv_list:\n if server[\"Emby_Path\"]==serv[\"Emby_Path\"]:\n server[\"name\"]=serv[\"name\"];\n server[\"Oppo_Path\"]=serv[\"Oppo_Path\"];\n server[\"Test_OK\"]=serv[\"Test_OK\"];\n servers.append(server)\n index=index+1\n config[\"servers\"]=servers\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir(\".\")\n encontrado=False\n list_dir=[]\n #a =\"\"\n #list_dir.append(a)\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return(list_dir)\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return(0)\n\ndef get_devices(config):\n try:\n EmbySession=EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index=0\n dev_temp = []\n for device in devices[\"Items\"]:\n try:\n if device[\"Id\"]!='Xnoppo':\n device[\"Name\"]=device[\"Name\"] + \" / \" + device[\"AppName\"]\n device[\"Id\"]=device[\"ReportedDeviceId\"]\n dev_temp.append(device)\n except:\n pass\n config[\"devices\"]=dev_temp\n return('OK')\n except:\n return('FAILURE')\n\nclass MyServer(BaseHTTPRequestHandler):\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n \n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(i,\"utf-8\"))\n return(0)\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return(0)\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\")) \n return(0)\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = update_version(config,vers_path,cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/restart':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n a = \"Restarting\"\n self.wfile.write(bytes(a,\"utf-8\"))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n if self.path == '/lang':\n self.send_response(200)\n self.send_header(\"Content-type\", \"application/json\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = cargar_lang(lang_path + config[\"language\"] + separador +'lang.js')\n self.wfile.write(bytes(json.dumps(a),\"utf-8\")) \n return(0)\n if self.path.find(\"/send_key?\")>=0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b=get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n sendnotifyremote(config[\"Oppo_IP\"])\n result=check_socket(config)\n if b=='PON':\n if result==0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey(\"EJT\",config)\n if config[\"BRDisc\"]==True:\n time.sleep(1)\n response_data_on = sendremotekey(\"EJT\",config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b,config)\n self.send_response(200)\n self.send_header(\"Content-type\", \"text\")\n self.end_headers()\n a = \"ok\"\n self.wfile.write(bytes(a,\"utf-8\")) \n return(0)\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header(\"Content-type\", \"text\")\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a)) \n return(0)\n else:\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.end_headers()\n self.wfile.write(bytes(\"<html><head><title>https://pythonbasics.org</title></head>\", \"utf-8\"))\n self.wfile.write(bytes(\"<p>Request: %s</p>\" % self.path, \"utf-8\"))\n self.wfile.write(bytes(\"<body>\", \"utf-8\"))\n self.wfile.write(bytes(\"<p>This is an example web server.</p>\", \"utf-8\"))\n self.wfile.write(bytes(\"</body></html>\", \"utf-8\"))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n \n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n status = get_state()\n if status[\"Playstate\"]==\"Not_Connected\":\n save_config(cwd + separador + 'config.json',config)\n emby_wsocket.ws_config=config\n restart()\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = test_path(config,server)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(server))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj[\"path\"]\n config = cargar_config(cwd + separador + 'config.json',tv_path,av_path,lang_path)\n a = navigate_folder(path,config)\n a_json=json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header(\"Content-Length\", len(a_json))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a),\"utf-8\"))\n return(0)\n\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n move_files(tv_path + config[\"TV_model\"],lib_path)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n restart()\n return(0)\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json',config)\n move_files(av_path + config[\"AV_model\"],lib_path)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n restart()\n return(0)\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = get_hdmi_list(config)\n if a != None:\n config[\"AV_SOURCES\"]=a\n save_config(cwd + separador + 'config.json',config)\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length']) # <--- Gets the size of data\n post_data = self.rfile.read(content_length) # <--- Gets the data itself\n config = json.loads(post_data.decode('utf-8')) \n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header(\"Content-Length\", len(config))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config),\"utf-8\"))\n else:\n self.send_response(300)\n self.send_header(\"Content-Length\", len(\"ERROR\"))\n self.send_header(\"Content-Type\", \"text/html\")\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(\"ERROR\",\"utf-8\"))\n return(0)\nif __name__ == \"__main__\":\n\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador=\"\\\\\"\n else:\n separador=\"/\"\n config_file = cwd + separador + \"config.json\"\n resource_path=cwd + separador + 'web' + separador + 'resources' + separador\n html_path = cwd + separador + 'web' + separador\n tv_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'TV' + separador\n av_path = cwd + separador + 'web' + separador + 'libraries' + separador + 'AV' + separador\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n config = cargar_config(config_file,tv_path,av_path,lang_path)\n logfile=cwd + separador + \"emby_xnoppo_client_logging.log\"\n lang = cargar_lang(lang_path + config[\"language\"] + separador +'lang.js')\n\n if config[\"DebugLevel\"]==0:\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.CRITICAL)\n elif config[\"DebugLevel\"]==1:\n rfh = logging.handlers.RotatingFileHandler(\n filename=logfile, \n mode='a',\n maxBytes=50*1024*1024,\n backupCount=2,\n encoding=None,\n delay=0\n )\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.INFO,handlers=[rfh])\n elif config[\"DebugLevel\"]==2:\n rfh = logging.handlers.RotatingFileHandler(\n filename=logfile, \n mode='a',\n maxBytes=5*1024*1024,\n backupCount=2,\n encoding=None,\n delay=0\n )\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',datefmt='%d/%m/%Y %I:%M:%S %p',level=logging.DEBUG,handlers=[rfh])\n emby_wsocket = xnoppo_ws()\n emby_wsocket.ws_config=config\n emby_wsocket.config_file=config_file\n emby_wsocket.ws_lang=lang\n x = threading.Thread(target=thread_function, args=(emby_wsocket,))\n x.start()\n espera=0\n estado_anterior=''\n\n logging.debug('Arrancamos el Servidor Web\\n')\n serverPort = 8090\n webServer = HTTPServer((\"\", serverPort), MyServer)\n print(\"Server started http://%s:%s\" % (\"\", serverPort))\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n webServer.server_close()\n logging.info('Fin proceso')\n logging.info('Finished')\n print(\"Server stopped.\")\n",
"import http.server\nimport socketserver\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time\nimport json\nimport io\nimport urllib\nimport requests\nfrom lib.Emby_ws import xnoppo_ws\nfrom lib.Emby_http import *\nfrom lib.Xnoppo import *\nfrom lib.Xnoppo_TV import *\nimport lib.Xnoppo_AVR\nimport shutil\nimport asyncio\nimport threading\nimport logging\nimport logging.handlers\nimport psutil\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return 0\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\nif __name__ == '__main__':\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n config_file = cwd + separador + 'config.json'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n config = cargar_config(config_file, tv_path, av_path, lang_path)\n logfile = cwd + separador + 'emby_xnoppo_client_logging.log'\n lang = cargar_lang(lang_path + config['language'] + separador + 'lang.js')\n if config['DebugLevel'] == 0:\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.CRITICAL)\n elif config['DebugLevel'] == 1:\n rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=\n 'a', maxBytes=50 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.INFO, handlers=[rfh])\n elif config['DebugLevel'] == 2:\n rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=\n 'a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.DEBUG, handlers=[rfh]\n )\n emby_wsocket = xnoppo_ws()\n emby_wsocket.ws_config = config\n emby_wsocket.config_file = config_file\n emby_wsocket.ws_lang = lang\n x = threading.Thread(target=thread_function, args=(emby_wsocket,))\n x.start()\n espera = 0\n estado_anterior = ''\n logging.debug('Arrancamos el Servidor Web\\n')\n serverPort = 8090\n webServer = HTTPServer(('', serverPort), MyServer)\n print('Server started http://%s:%s' % ('', serverPort))\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n webServer.server_close()\n logging.info('Fin proceso')\n logging.info('Finished')\n print('Server stopped.')\n",
"<import token>\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return 0\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\nif __name__ == '__main__':\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n config_file = cwd + separador + 'config.json'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n config = cargar_config(config_file, tv_path, av_path, lang_path)\n logfile = cwd + separador + 'emby_xnoppo_client_logging.log'\n lang = cargar_lang(lang_path + config['language'] + separador + 'lang.js')\n if config['DebugLevel'] == 0:\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.CRITICAL)\n elif config['DebugLevel'] == 1:\n rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=\n 'a', maxBytes=50 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.INFO, handlers=[rfh])\n elif config['DebugLevel'] == 2:\n rfh = logging.handlers.RotatingFileHandler(filename=logfile, mode=\n 'a', maxBytes=5 * 1024 * 1024, backupCount=2, encoding=None,\n delay=0)\n logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',\n datefmt='%d/%m/%Y %I:%M:%S %p', level=logging.DEBUG, handlers=[rfh]\n )\n emby_wsocket = xnoppo_ws()\n emby_wsocket.ws_config = config\n emby_wsocket.config_file = config_file\n emby_wsocket.ws_lang = lang\n x = threading.Thread(target=thread_function, args=(emby_wsocket,))\n x.start()\n espera = 0\n estado_anterior = ''\n logging.debug('Arrancamos el Servidor Web\\n')\n serverPort = 8090\n webServer = HTTPServer(('', serverPort), MyServer)\n print('Server started http://%s:%s' % ('', serverPort))\n try:\n webServer.serve_forever()\n except KeyboardInterrupt:\n pass\n webServer.server_close()\n logging.info('Fin proceso')\n logging.info('Finished')\n print('Server stopped.')\n",
"<import token>\n\n\ndef get_version():\n return '2.01'\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return 0\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\ndef move_files(src, dest):\n os.chdir(src)\n src_files = os.listdir('.')\n for file_name in src_files:\n full_file_name = os.path.join(src, file_name)\n if os.path.isfile(full_file_name):\n shutil.copy(full_file_name, dest)\n return 0\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\ndef get_state():\n status = {}\n status['Version'] = get_version()\n try:\n status['Playstate'] = emby_wsocket.EmbySession.playstate\n status['playedtitle'] = emby_wsocket.EmbySession.playedtitle\n status['server'] = emby_wsocket.EmbySession.server\n status['folder'] = emby_wsocket.EmbySession.folder\n status['filename'] = emby_wsocket.EmbySession.filename\n status['CurrentData'] = emby_wsocket.EmbySession.currentdata\n except:\n status['Playstate'] = 'Not_Connected'\n status['playedtitle'] = ''\n status['server'] = ''\n status['folder'] = ''\n status['filename'] = ''\n status['CurrentData'] = ''\n status['cpu_perc'] = psutil.cpu_percent()\n status['mem_perc'] = psutil.virtual_memory().percent\n print(psutil.virtual_memory().percent)\n print(status)\n return status\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\ndef leer_file(web_file):\n with open(web_file, 'r', encoding='utf8') as f:\n num = f.read()\n f.close\n return num\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\ndef get_dir_folders(directory):\n os.chdir(directory)\n dirs = os.listdir('.')\n encontrado = False\n list_dir = []\n for x in dirs:\n if os.path.isdir(x):\n list_dir.append(x)\n return list_dir\n\n\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\ndef get_mount_path(movie, server_data):\n movie = movie.replace(server_data['Emby_Path'], server_data['Oppo_Path'])\n movie = movie.replace('\\\\\\\\', '\\\\')\n movie = movie.replace('\\\\', '/')\n word = '/'\n inicio = movie.find(word)\n inicio = inicio + 1\n final = movie.find(word, inicio, len(movie))\n servidor = movie[inicio:final]\n ultimo = final + 1\n result = final + 1\n while result > 0:\n ultimo = result + 1\n result = movie.find(word, ultimo, len(movie))\n fichero = movie[ultimo:len(movie)]\n final = final + 1\n ultimo = ultimo - 1\n carpeta = movie[final:ultimo]\n resultado = {}\n resultado['Servidor'] = servidor\n resultado['Carpeta'] = carpeta\n resultado['Fichero'] = fichero\n return resultado\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef thread_function(ws_object):\n print('Thread: starting')\n ws_object.start()\n print('Thread: finishing')\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\n<function token>\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n\n\ndef leer_img(web_file):\n with open(web_file, 'rb') as f:\n num = f.read()\n f.close\n return num\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\n<function token>\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n\n\ndef test_path(config, server):\n rutas = get_mount_path(server['Emby_Path'] + '/test.mkv', server)\n result2 = test_mount_path(config, rutas['Servidor'], rutas['Carpeta'])\n return result2\n\n\n<function token>\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\ndef get_selectableFolders(config):\n EmbySession = EmbyHttp(config)\n MediaFolders = EmbySession.get_emby_selectablefolders()\n servers = []\n for Folder in MediaFolders:\n index = 1\n active = is_library_active(config, Folder['Name'])\n if config['enable_all_libraries'] == True:\n active = True\n if active == True:\n for SubFolder in Folder['SubFolders']:\n server = {}\n server['Id'] = SubFolder['Id']\n if index > 1:\n server['name'] = Folder['Name'] + '(' + str(index) + ')'\n else:\n server['name'] = Folder['Name']\n server['Emby_Path'] = SubFolder['Path']\n server['Oppo_Path'] = '/'\n try:\n serv_list = config['servers']\n except:\n serv_list = {}\n for serv in serv_list:\n if server['Emby_Path'] == serv['Emby_Path']:\n server['name'] = serv['name']\n server['Oppo_Path'] = serv['Oppo_Path']\n server['Test_OK'] = serv['Test_OK']\n servers.append(server)\n index = index + 1\n config['servers'] = servers\n\n\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\ndef test_emby(config):\n try:\n EmbySession = EmbyHttp(config)\n user_info = EmbySession.user_info\n if user_info['SessionInfo']['Id'] != '':\n return 'OK'\n else:\n return 'FAILED'\n except:\n return 'FAILED'\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\ndef check_version(config):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n print(config['check_beta'])\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n xno_version = get_version()\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n print(xno_version)\n print(last_version)\n if xno_version < last_version:\n resp['new_version'] = True\n else:\n resp['new_version'] = False\n print(resp)\n return resp\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\n<function token>\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\n<function token>\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_mount_path(config, servidor, carpeta):\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n while response_data6f.find('devicelist\":[]') > 0:\n time.sleep(1)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('QPW', config)\n device_list = json.loads(response_data6f)\n if config['DebugLevel'] > 0:\n print(device_list)\n nfs = config['default_nfs']\n for device in device_list['devicelist']:\n if device['name'].upper() == servidor.upper():\n if device['sub_type'] == 'nfs':\n nfs = True\n break\n else:\n nfs = False\n break\n if nfs:\n response_login = LoginNFS(config, servidor)\n else:\n response_login = LoginSambaWithOutID(config, servidor)\n if config['Always_ON'] == False:\n time.sleep(5)\n response_data6b = getsetupmenu(config)\n if nfs:\n response_mount = mountSharedNFSFolder(servidor, carpeta, '', '',\n config)\n else:\n response_mount = mountSharedFolder(servidor, carpeta, '', '',\n config)\n response = json.loads(response_mount)\n if config['Autoscript'] == True:\n result = umountSharedFolder(config)\n if response['success'] == True:\n a = 'OK'\n else:\n a = 'FAILURE'\n return a\n else:\n print(\n 'No se puede conectar, revisa las configuraciones o que el OPPO este encendido o en reposo'\n )\n\n\n<function token>\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\n<function token>\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_oppo(config):\n result = check_socket(config)\n if result == 0:\n return 'OK'\n else:\n return 'FAILED'\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef restart():\n print('restart')\n try:\n emby_wsocket.stop()\n except:\n sys.exit()\n sys.exit()\n print('fin restart')\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\n<function token>\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\n<function token>\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_devices(config):\n try:\n EmbySession = EmbyHttp(config)\n devices = EmbySession.get_emby_devices()\n index = 0\n dev_temp = []\n for device in devices['Items']:\n try:\n if device['Id'] != 'Xnoppo':\n device['Name'] = device['Name'] + ' / ' + device['AppName']\n device['Id'] = device['ReportedDeviceId']\n dev_temp.append(device)\n except:\n pass\n config['devices'] = dev_temp\n return 'OK'\n except:\n return 'FAILURE'\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\n<function token>\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\ndef cargar_lang(config_file):\n with open(config_file.encode(sys.getfilesystemencoding()), 'r',\n encoding='latin-1') as f:\n config = json.load(f)\n f.close\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef save_config(config_file, config):\n with open(config_file, 'w') as fw:\n json.dump(config, fw, indent=4)\n fw.close\n try:\n emby_wsocket.ws_config = config\n emby_wsocket.EmbySession.config = config\n except:\n emby_wsocket.ws_config = config\n\n\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\n<function token>\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\n<function token>\n\n\ndef update_version(config, vers_path, cwd):\n url = (\n 'https://raw.githubusercontent.com/siberian-git/Xnoppo/main/versions/version.js'\n )\n headers = {}\n response = requests.get(url, headers=headers)\n version = json.loads(response.text)\n print(version)\n if config['check_beta'] == True:\n last_version = version['beta_version']\n last_version_file = version['beta_version_file']\n else:\n last_version = version['curr_version']\n last_version_file = version['curr_version_file']\n url2 = ('https://github.com/siberian-git/Xnoppo/raw/main/versions/' +\n last_version_file)\n headers = {}\n response2 = requests.get(url2, headers=headers)\n filename = vers_path + last_version_file\n with open(filename, 'wb') as f:\n f.write(response2.content)\n f.close()\n shutil.unpack_archive(filename, cwd)\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n if config['TV'] == True and config['TV_model'] != '':\n move_files(tv_path + config['TV_model'], lib_path)\n if config['AV'] == True and config['AV_model'] != '':\n move_files(av_path + config['AV_model'], lib_path)\n resp = {}\n resp['version'] = last_version\n resp['file'] = last_version_file\n resp['new_version'] = False\n return resp\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef cargar_config(config_file, tv_path, av_path, lang_path):\n with open(config_file, 'r') as f:\n config = json.load(f)\n f.close\n config['Version'] = get_version()\n default = config.get('Autoscript', False)\n config['Autoscript'] = default\n default = config.get('enable_all_libraries', False)\n config['enable_all_libraries'] = default\n default = config.get('TV_model', '')\n config['TV_model'] = default\n default = config.get('TV_SOURCES', [])\n config['TV_SOURCES'] = default\n default = config.get('AV_model', '')\n config['AV_model'] = default\n default = config.get('AV_SOURCES', [])\n config['AV_SOURCES'] = default\n default = config.get('TV_script_init', '')\n config['TV_script_init'] = default\n default = config.get('TV_script_end', '')\n config['TV_script_end'] = default\n default = config.get('av_delay_hdmi', 0)\n config['av_delay_hdmi'] = default\n default = config.get('AV_Port', 23)\n config['AV_Port'] = default\n default = config.get('timeout_oppo_mount', 60)\n config['timeout_oppo_mount'] = default\n default = config.get('language', 'es-ES')\n config['language'] = default\n default = config.get('default_nfs', False)\n config['default_nfs'] = default\n default = config.get('wait_nfs', False)\n config['wait_nfs'] = default\n default = config.get('refresh_time', 5)\n config['refresh_time'] = default\n default = config.get('check_beta', False)\n config['check_beta'] = default\n default = config.get('smbtrick', False)\n config['smbtrick'] = default\n default = config.get('BRDisc', False)\n config['BRDisc'] = default\n edit_server = 0\n server_list = config['servers']\n for server in server_list:\n default = server.get('Test_OK', False)\n server_list[edit_server]['Test_OK'] = default\n edit_server = edit_server + 1\n if config['TV'] == 'True':\n config['TV'] = True\n if config['TV'] == 'False':\n config['TV'] = False\n if config['AV'] == 'True':\n config['AV'] = True\n if config['AV'] == 'False':\n config['AV'] = False\n config['servers'] = server_list\n config['tv_dirs'] = get_dir_folders(tv_path)\n config['av_dirs'] = get_dir_folders(av_path)\n config['langs'] = get_dir_folders(lang_path)\n return config\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\ndef is_library_active(config, libraryname):\n for library in config['Libraries']:\n if library['Name'] == libraryname:\n return library['Active']\n return False\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef carga_libraries(config):\n try:\n EmbySession = EmbyHttp(config)\n views_list = EmbySession.get_user_views(EmbySession.user_info[\n 'User']['Id'])\n libraries = []\n for view in views_list:\n library = {}\n library['Name'] = view['Name']\n library['Id'] = view['Id']\n library['Active'] = False\n try:\n lib_list = config['Libraries']\n except:\n lib_list = {}\n for lib in lib_list:\n if lib['Id'] == view['Id']:\n library['Active'] = lib['Active']\n libraries.append(library)\n config['Libraries'] = libraries\n return 0\n except:\n return 1\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n\n def do_POST(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lib_path = cwd + separador + 'lib' + separador\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/save_config':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n if self.path == '/check_emby':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_emby(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n status = get_state()\n if status['Playstate'] == 'Not_Connected':\n save_config(cwd + separador + 'config.json', config)\n emby_wsocket.ws_config = config\n restart()\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/check_oppo':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = test_oppo(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/test_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n server = json.loads(post_data.decode('utf-8'))\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = test_path(config, server)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(server))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(server), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/navigate_path':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n path_obj = json.loads(post_data.decode('utf-8'))\n path = path_obj['path']\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = navigate_folder(path, config)\n a_json = json.dumps(a)\n print(len(a_json))\n self.send_response(200)\n self.send_header('Content-Length', len(a_json))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/move_tv':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(tv_path + config['TV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/move_av':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n save_config(cwd + separador + 'config.json', config)\n move_files(av_path + config['AV_model'], lib_path)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n restart()\n return 0\n if self.path == '/get_tv_key':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_key(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_conn':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_test_conn(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_tv_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_tv_sources(config)\n if a == 'OK':\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/get_av_sources':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = get_hdmi_list(config)\n if a != None:\n config['AV_SOURCES'] = a\n save_config(cwd + separador + 'config.json', config)\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_init':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/tv_test_end':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = tv_set_prev(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_on':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_check_power(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_off':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_power_off(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n if self.path == '/av_test_hdmi':\n content_length = int(self.headers['Content-Length'])\n post_data = self.rfile.read(content_length)\n config = json.loads(post_data.decode('utf-8'))\n a = av_change_hdmi(config)\n if a == 'OK':\n self.send_response(200)\n self.send_header('Content-Length', len(config))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes(json.dumps(config), 'utf-8'))\n else:\n self.send_response(300)\n self.send_header('Content-Length', len('ERROR'))\n self.send_header('Content-Type', 'text/html')\n self.send_header('Access-Control-Allow-Credentials', 'true')\n self.send_header('Access-Control-Allow-Origin', '*')\n self.end_headers()\n self.wfile.write(bytes('ERROR', 'utf-8'))\n return 0\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n\n def do_GET(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n if sys.platform.startswith('win'):\n separador = '\\\\'\n else:\n separador = '/'\n resource_path = (cwd + separador + 'web' + separador + 'resources' +\n separador)\n html_path = cwd + separador + 'web' + separador\n tv_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'TV' + separador)\n av_path = (cwd + separador + 'web' + separador + 'libraries' +\n separador + 'AV' + separador)\n lang_path = cwd + separador + 'web' + separador + 'lang' + separador\n vers_path = cwd + separador + 'versions' + separador\n print(self.path)\n if self.path == '/emby_conf.html':\n i = leer_file(html_path + 'emby_conf.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/oppo_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'oppo_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/lib_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'lib_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/path_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'path_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/tv_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'tv_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/av_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'av_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/other_conf.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'other_conf.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/status.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'status.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/help.html':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_file(html_path + 'help.html')\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/remote.html':\n i = leer_file(html_path + 'remote.html')\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(i, 'utf-8'))\n return 0\n if self.path == '/android-chrome-36x36.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'android-chrome-36x36.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/av-receiver-icon-2.jpg':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'av-receiver-icon-2.jpg')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/dragon.png':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n i = leer_img(resource_path + 'dragon.png')\n self.wfile.write(bytes(i))\n return 0\n if self.path == '/xnoppo_config':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_lib':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n carga_libraries(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/xnoppo_config_dev':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_devices(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/check_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = check_version(config)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/update_version':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = update_version(config, vers_path, cwd)\n restart()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/get_state':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = get_state()\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/restart':\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n a = 'Restarting'\n self.wfile.write(bytes(a, 'utf-8'))\n restart()\n if self.path == '/refresh_paths':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n a = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n get_selectableFolders(a)\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path == '/lang':\n self.send_response(200)\n self.send_header('Content-type', 'application/json')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = cargar_lang(lang_path + config['language'] + separador +\n 'lang.js')\n self.wfile.write(bytes(json.dumps(a), 'utf-8'))\n return 0\n if self.path.find('/send_key?') >= 0:\n get_data = self.path\n print(get_data)\n a = len('/send_key?sendkey=')\n b = get_data[a:len(get_data)]\n print(b)\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n sendnotifyremote(config['Oppo_IP'])\n result = check_socket(config)\n if b == 'PON':\n if result == 0:\n response_data6a = getmainfirmwareversion(config)\n response_data6c = getdevicelist(config)\n response_data6b = getsetupmenu(config)\n response_data6c = OppoSignin(config)\n response_data6d = getdevicelist(config)\n response_data6e = getglobalinfo(config)\n response_data6f = getdevicelist(config)\n response_data_on = sendremotekey('EJT', config)\n if config['BRDisc'] == True:\n time.sleep(1)\n response_data_on = sendremotekey('EJT', config)\n time.sleep(1)\n response_data6b = getsetupmenu(config)\n else:\n response_data_on = sendremotekey(b, config)\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n a = 'ok'\n self.wfile.write(bytes(a, 'utf-8'))\n return 0\n if self.path == '/log.txt':\n self.send_response(200)\n self.send_header('Content-type', 'text')\n self.end_headers()\n config = cargar_config(cwd + separador + 'config.json', tv_path,\n av_path, lang_path)\n a = leer_img(cwd + separador + 'emby_xnoppo_client_logging.log')\n self.wfile.write(bytes(a))\n return 0\n else:\n self.send_response(200)\n self.send_header('Content-type', 'text/html')\n self.end_headers()\n self.wfile.write(bytes(\n '<html><head><title>https://pythonbasics.org</title></head>',\n 'utf-8'))\n self.wfile.write(bytes('<p>Request: %s</p>' % self.path, 'utf-8'))\n self.wfile.write(bytes('<body>', 'utf-8'))\n self.wfile.write(bytes('<p>This is an example web server.</p>',\n 'utf-8'))\n self.wfile.write(bytes('</body></html>', 'utf-8'))\n <function token>\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass MyServer(BaseHTTPRequestHandler):\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n<code token>\n"
] | false |
505 |
33cc8814d9397bcb0041728407efef80a136f151
|
#!/usr/bin/env python
import argparse
import keyring
import papercut
import ConfigParser
import getpass
import time
import os
config = ConfigParser.ConfigParser()
config.read([os.path.expanduser('~/.papercut')])
try:
username = config.get('papercut','username')
except ConfigParser.NoSectionError:
username = None
p = argparse.ArgumentParser(description='Print some documents')
p.add_argument('--print', '-p', help='a filename to be printed', dest='printjob')
p.add_argument('--printer', '-r', help='a printer name to print to')
p.add_argument('--balance', '-b', nargs='?', const=True, help='display the user\' printing balance')
p.add_argument('--list', '-l', nargs='?', const=True, help='list available printers')
p.add_argument('--user', '-u', help='username')
p.add_argument('--password-options', '-o', choices=['save','prompt'], help='save: prompt for password and save to keyring,\n prompt: prompt for password')
args = p.parse_args()
if not username and not args.user:
username = raw_input('enter username: ')
password = keyring.get_password('papercut', username)
def list_printers(sessID):
printers = papercut.listPrinters(sessID)
print "\nAvailable printers:"
for i,printer in enumerate(printers):
print i,"\t",printer[1], "." * (50 - len(printer[1])), printer[0]
return printers
def get_balance(sessID):
print '\nYour balance is now: $ %.2f' % (int(papercut.getBalance(sessID)) / 100.0)
if args.password_options or not password:
password = getpass.getpass()
if args.password_options == 'save':
keyring.set_password('papercut', username, password)
print "password saved in keyring"
if args.list or args.balance or args.printjob or args.printer:
sessID = papercut.login(username, password)
if sessID:
print '\nLogged in to PaperCut with session ID',sessID
if args.list: list_printers(sessID)
if args.balance: get_balance(sessID)
if args.printjob:
if not args.printer:
printers = list_printers(sessID)
args.printer = raw_input('select printer: ')
try:
printerIndex = int(args.printer)
args.printer = printers[printerIndex][1]
except ValueError:
pass
printJobID = papercut.printFile(args.printjob, args.printer, sessID)
print '\nJob sent to printer', args.printer
status = papercut.getPrintStatus(printJobID)
while(status['status'] == 'Submitting'):
time.sleep(0.1)
status = papercut.getPrintStatus(printJobID)
print "\nJob queued for printing."
while(not status['complete']):
time.sleep(0.1)
status = papercut.getPrintStatus(printJobID)
print "\nComplete!"
print "\nThis job cost $", status['cost']
# print status
get_balance(sessID)
else:
print '\nDid not successfully log in to PaperCut'
|
[
"#!/usr/bin/env python\nimport argparse\nimport keyring\nimport papercut\nimport ConfigParser\nimport getpass\nimport time\nimport os\n\nconfig = ConfigParser.ConfigParser()\nconfig.read([os.path.expanduser('~/.papercut')])\ntry:\n username = config.get('papercut','username')\nexcept ConfigParser.NoSectionError:\n username = None\n\np = argparse.ArgumentParser(description='Print some documents')\np.add_argument('--print', '-p', help='a filename to be printed', dest='printjob')\np.add_argument('--printer', '-r', help='a printer name to print to')\np.add_argument('--balance', '-b', nargs='?', const=True, help='display the user\\' printing balance')\np.add_argument('--list', '-l', nargs='?', const=True, help='list available printers')\np.add_argument('--user', '-u', help='username')\np.add_argument('--password-options', '-o', choices=['save','prompt'], help='save: prompt for password and save to keyring,\\n prompt: prompt for password')\n\nargs = p.parse_args()\n\nif not username and not args.user:\n username = raw_input('enter username: ')\n\npassword = keyring.get_password('papercut', username)\n\ndef list_printers(sessID):\n printers = papercut.listPrinters(sessID)\n print \"\\nAvailable printers:\"\n for i,printer in enumerate(printers):\n print i,\"\\t\",printer[1], \".\" * (50 - len(printer[1])), printer[0]\n return printers\n\ndef get_balance(sessID):\n print '\\nYour balance is now: $ %.2f' % (int(papercut.getBalance(sessID)) / 100.0)\n\n\n\nif args.password_options or not password:\n password = getpass.getpass()\n \nif args.password_options == 'save':\n keyring.set_password('papercut', username, password)\n print \"password saved in keyring\"\n\nif args.list or args.balance or args.printjob or args.printer:\n sessID = papercut.login(username, password)\n if sessID:\n print '\\nLogged in to PaperCut with session ID',sessID\n if args.list: list_printers(sessID)\n if args.balance: get_balance(sessID)\n if args.printjob:\n if not args.printer:\n printers = list_printers(sessID)\n args.printer = raw_input('select printer: ')\n try:\n printerIndex = int(args.printer)\n args.printer = printers[printerIndex][1]\n except ValueError:\n pass\n printJobID = papercut.printFile(args.printjob, args.printer, sessID)\n print '\\nJob sent to printer', args.printer\n \n status = papercut.getPrintStatus(printJobID)\n while(status['status'] == 'Submitting'):\n time.sleep(0.1)\n status = papercut.getPrintStatus(printJobID)\n print \"\\nJob queued for printing.\"\n\n while(not status['complete']):\n time.sleep(0.1)\n status = papercut.getPrintStatus(printJobID)\n print \"\\nComplete!\"\n print \"\\nThis job cost $\", status['cost']\n# print status\n get_balance(sessID)\n \n else:\n print '\\nDid not successfully log in to PaperCut'\n \n\n\n \n"
] | true |
506 |
5dc8f420e16ee14ecfdc61413f10a783e819ec32
|
import sys
def is_huge(A, B):
return (A[0] > B[0]) and (A[1] > B[1])
if __name__ == '__main__':
bulks = []
num = int(sys.stdin.readline())
for i in range(num):
bulks.append(list(map(int, sys.stdin.readline().split())))
for i in range(len(bulks)):
count = 0
for j in range(len(bulks)):
if bulks[i] != bulks[j] and is_huge(bulks[j], bulks[i]):
count += 1
if count == 0:
print(1, end=" ")
else:
print(count+1, end=" ")
|
[
"import sys\n\n\ndef is_huge(A, B):\n return (A[0] > B[0]) and (A[1] > B[1])\n\n\nif __name__ == '__main__':\n bulks = []\n num = int(sys.stdin.readline())\n for i in range(num):\n bulks.append(list(map(int, sys.stdin.readline().split())))\n\n for i in range(len(bulks)):\n count = 0\n for j in range(len(bulks)):\n if bulks[i] != bulks[j] and is_huge(bulks[j], bulks[i]):\n count += 1\n\n if count == 0:\n print(1, end=\" \")\n else:\n print(count+1, end=\" \")\n",
"import sys\n\n\ndef is_huge(A, B):\n return A[0] > B[0] and A[1] > B[1]\n\n\nif __name__ == '__main__':\n bulks = []\n num = int(sys.stdin.readline())\n for i in range(num):\n bulks.append(list(map(int, sys.stdin.readline().split())))\n for i in range(len(bulks)):\n count = 0\n for j in range(len(bulks)):\n if bulks[i] != bulks[j] and is_huge(bulks[j], bulks[i]):\n count += 1\n if count == 0:\n print(1, end=' ')\n else:\n print(count + 1, end=' ')\n",
"<import token>\n\n\ndef is_huge(A, B):\n return A[0] > B[0] and A[1] > B[1]\n\n\nif __name__ == '__main__':\n bulks = []\n num = int(sys.stdin.readline())\n for i in range(num):\n bulks.append(list(map(int, sys.stdin.readline().split())))\n for i in range(len(bulks)):\n count = 0\n for j in range(len(bulks)):\n if bulks[i] != bulks[j] and is_huge(bulks[j], bulks[i]):\n count += 1\n if count == 0:\n print(1, end=' ')\n else:\n print(count + 1, end=' ')\n",
"<import token>\n\n\ndef is_huge(A, B):\n return A[0] > B[0] and A[1] > B[1]\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
507 |
d6a677ed537f6493bb43bd893f3096dc058e27da
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements command to describe a given guest policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute.os_config import osconfig_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.compute.os_config import resource_args
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class Describe(base.DescribeCommand):
"""Describe the given guest policy.
## EXAMPLES
To describe the guest policy 'policy1' in the project 'project1', run:
$ {command} policy1 --project=project1
To describe the guest policy 'policy1' in the organization '12345', run:
$ {command} policy1 --organization=12345
"""
@staticmethod
def Args(parser):
resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')
def Run(self, args):
guest_policy_ref = args.CONCEPTS.guest_policy.Parse()
release_track = self.ReleaseTrack()
client = osconfig_utils.GetClientInstance(release_track)
messages = osconfig_utils.GetClientMessages(release_track)
guest_policy_type = guest_policy_ref.type_
guest_policy_name = guest_policy_ref.result.RelativeName()
if guest_policy_type == type(guest_policy_type).organization_guest_policy:
request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.organizations_guestPolicies
elif guest_policy_type == type(guest_policy_type).folder_guest_policy:
request = messages.OsconfigFoldersGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.folders_guestPolicies
else:
request = messages.OsconfigProjectsGuestPoliciesGetRequest(
name=guest_policy_name)
service = client.projects_guestPolicies
return service.Get(request)
|
[
"# -*- coding: utf-8 -*- #\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Implements command to describe a given guest policy.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nfrom googlecloudsdk.api_lib.compute.os_config import osconfig_utils\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.compute.os_config import resource_args\n\n\[email protected](base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n \"\"\"Describe the given guest policy.\n\n ## EXAMPLES\n\n To describe the guest policy 'policy1' in the project 'project1', run:\n\n $ {command} policy1 --project=project1\n\n To describe the guest policy 'policy1' in the organization '12345', run:\n\n $ {command} policy1 --organization=12345\n\n \"\"\"\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n\n if guest_policy_type == type(guest_policy_type).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.projects_guestPolicies\n\n return service.Get(request)\n",
"<docstring token>\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom googlecloudsdk.api_lib.compute.os_config import osconfig_utils\nfrom googlecloudsdk.calliope import base\nfrom googlecloudsdk.command_lib.compute.os_config import resource_args\n\n\[email protected](base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n \"\"\"Describe the given guest policy.\n\n ## EXAMPLES\n\n To describe the guest policy 'policy1' in the project 'project1', run:\n\n $ {command} policy1 --project=project1\n\n To describe the guest policy 'policy1' in the organization '12345', run:\n\n $ {command} policy1 --organization=12345\n\n \"\"\"\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n if guest_policy_type == type(guest_policy_type\n ).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=\n guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(name\n =guest_policy_name)\n service = client.projects_guestPolicies\n return service.Get(request)\n",
"<docstring token>\n<import token>\n\n\[email protected](base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n \"\"\"Describe the given guest policy.\n\n ## EXAMPLES\n\n To describe the guest policy 'policy1' in the project 'project1', run:\n\n $ {command} policy1 --project=project1\n\n To describe the guest policy 'policy1' in the organization '12345', run:\n\n $ {command} policy1 --organization=12345\n\n \"\"\"\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n if guest_policy_type == type(guest_policy_type\n ).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=\n guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(name\n =guest_policy_name)\n service = client.projects_guestPolicies\n return service.Get(request)\n",
"<docstring token>\n<import token>\n\n\[email protected](base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n <docstring token>\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n\n def Run(self, args):\n guest_policy_ref = args.CONCEPTS.guest_policy.Parse()\n release_track = self.ReleaseTrack()\n client = osconfig_utils.GetClientInstance(release_track)\n messages = osconfig_utils.GetClientMessages(release_track)\n guest_policy_type = guest_policy_ref.type_\n guest_policy_name = guest_policy_ref.result.RelativeName()\n if guest_policy_type == type(guest_policy_type\n ).organization_guest_policy:\n request = messages.OsconfigOrganizationsGuestPoliciesGetRequest(\n name=guest_policy_name)\n service = client.organizations_guestPolicies\n elif guest_policy_type == type(guest_policy_type).folder_guest_policy:\n request = messages.OsconfigFoldersGuestPoliciesGetRequest(name=\n guest_policy_name)\n service = client.folders_guestPolicies\n else:\n request = messages.OsconfigProjectsGuestPoliciesGetRequest(name\n =guest_policy_name)\n service = client.projects_guestPolicies\n return service.Get(request)\n",
"<docstring token>\n<import token>\n\n\[email protected](base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n <docstring token>\n\n @staticmethod\n def Args(parser):\n resource_args.AddGuestPolicyResourceArg(parser, 'to describe.')\n <function token>\n",
"<docstring token>\n<import token>\n\n\[email protected](base.ReleaseTrack.ALPHA)\nclass Describe(base.DescribeCommand):\n <docstring token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
508 |
e6acc7b022001d8419095ad6364a6ae9504ec7aa
|
from __future__ import annotations
from functools import cache
class Solution:
def countArrangement(self, n: int) -> int:
cache = {}
def helper(perm):
digits = len(perm)
if digits == 1:
return 1
if perm in cache:
return cache[perm]
cnt = 0
for i in range(digits):
if perm[i] % digits == 0 or digits % perm[i] == 0:
cnt += helper(perm[:i] + perm[i+1:])
cache[perm] = cnt
return cnt
return helper(tuple(range(1, n+1)))
class Solution:
def countArrangement(self, n: int) -> int:
# total number of bitset states possible
bitset_total = 2**n
dp = [[0 for _ in range(bitset_total)]
for _ in range(n+1)]
# all other valid states lead to this base case so mark this as 1
dp[0][0] = 1
# iterate over all positions
for i in range(1, n+1):
# iterate over all subsets
for bm in range(bitset_total):
# iterate over all numbers
for num in range(n):
# if number is not visited and satisfies condition in question
# & (各桁が両方とも1なら1になる)
# 1 << x (1を左にxシフトさせて右をゼロで埋める)
# ^ (XOR: 各桁の片方が1なら1になる)
if ((bm & (1 << num)) and
(((num+1) % i == 0) or
(i % (num+1) == 0))):
dp[i][bm] += dp[i-1][bm ^ (1 << num)]
return dp[-1][-1]
# bm is binary mask for visited numbers.
# i is current place we want to fill.
# Idea is to start from the end, and fill places in opposite direction,
# because for big numbers we potentially have less candidates.
# how dfs(bm, pl) will work:
# If we reached place 0 and procces was not interrupted so far,
# it means that we find beautiful arrangement.
# For each number 1, 2, ..., n we try to put this number on place pl:
# and we need to check two conditions: first, that this place is still empty,
# using bitmask and secondly that one of the two properties for beutiful arrangement
# holds. In this case we add dfs(bm^1<<i, pl - 1) to final answer.
# Finally, we run dfs(0, n): from the last place and with empty bit-mask.
class Solution:
def countArrangement(self, n: int) -> int:
@cache
def dfs(bm, i):
if i == 0:
return 1
cnt = 0
for num in range(n):
if not bm & 1 << num\
and ((num+1) % i == 0 or i % (num+1) == 0):
cnt += dfs(bm ^ 1 << num, i-1)
return cnt
return dfs(0, n)
# nums is the set of still available numbers.
# Note that my i goes downwards, from n to 1. Because position i = 1
# can hold any number, so I don't even have to check whether the last
# remaining number fits there. Also, position i = 2 happily holds
# every second number and i = 3 happily holds every third number,
# so filling the lowest positions last has a relatively high chance of success.
class Solution:
def countArrangement(self, n: int) -> int:
def count(i, nums):
if i == 1:
return 1
return sum(count(i-1, nums-{num})
for num in nums
if num % i == 0 or i % num == 0)
return count(n, set(range(1, n+1)))
|
[
"from __future__ import annotations\nfrom functools import cache\n\n\nclass Solution:\n def countArrangement(self, n: int) -> int:\n cache = {}\n\n def helper(perm):\n digits = len(perm)\n if digits == 1:\n return 1\n if perm in cache:\n return cache[perm]\n cnt = 0\n for i in range(digits):\n if perm[i] % digits == 0 or digits % perm[i] == 0:\n cnt += helper(perm[:i] + perm[i+1:])\n cache[perm] = cnt\n return cnt\n\n return helper(tuple(range(1, n+1)))\n\n\nclass Solution:\n def countArrangement(self, n: int) -> int:\n # total number of bitset states possible\n bitset_total = 2**n\n dp = [[0 for _ in range(bitset_total)]\n for _ in range(n+1)]\n # all other valid states lead to this base case so mark this as 1\n dp[0][0] = 1\n # iterate over all positions\n for i in range(1, n+1):\n # iterate over all subsets\n for bm in range(bitset_total):\n # iterate over all numbers\n for num in range(n):\n # if number is not visited and satisfies condition in question\n # & (各桁が両方とも1なら1になる)\n # 1 << x (1を左にxシフトさせて右をゼロで埋める)\n # ^ (XOR: 各桁の片方が1なら1になる)\n if ((bm & (1 << num)) and\n (((num+1) % i == 0) or\n (i % (num+1) == 0))):\n dp[i][bm] += dp[i-1][bm ^ (1 << num)]\n return dp[-1][-1]\n\n\n# bm is binary mask for visited numbers.\n# i is current place we want to fill. \n# Idea is to start from the end, and fill places in opposite direction,\n# because for big numbers we potentially have less candidates.\n# how dfs(bm, pl) will work:\n# If we reached place 0 and procces was not interrupted so far,\n# it means that we find beautiful arrangement.\n# For each number 1, 2, ..., n we try to put this number on place pl:\n# and we need to check two conditions: first, that this place is still empty,\n# using bitmask and secondly that one of the two properties for beutiful arrangement\n# holds. In this case we add dfs(bm^1<<i, pl - 1) to final answer.\n# Finally, we run dfs(0, n): from the last place and with empty bit-mask.\nclass Solution:\n def countArrangement(self, n: int) -> int:\n @cache\n def dfs(bm, i):\n if i == 0:\n return 1\n\n cnt = 0\n for num in range(n):\n if not bm & 1 << num\\\n and ((num+1) % i == 0 or i % (num+1) == 0):\n cnt += dfs(bm ^ 1 << num, i-1)\n return cnt\n\n return dfs(0, n)\n\n\n# nums is the set of still available numbers.\n# Note that my i goes downwards, from n to 1. Because position i = 1\n# can hold any number, so I don't even have to check whether the last\n# remaining number fits there. Also, position i = 2 happily holds\n# every second number and i = 3 happily holds every third number,\n# so filling the lowest positions last has a relatively high chance of success.\nclass Solution:\n def countArrangement(self, n: int) -> int:\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i-1, nums-{num})\n for num in nums\n if num % i == 0 or i % num == 0)\n return count(n, set(range(1, n+1)))\n",
"from __future__ import annotations\nfrom functools import cache\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n cache = {}\n\n def helper(perm):\n digits = len(perm)\n if digits == 1:\n return 1\n if perm in cache:\n return cache[perm]\n cnt = 0\n for i in range(digits):\n if perm[i] % digits == 0 or digits % perm[i] == 0:\n cnt += helper(perm[:i] + perm[i + 1:])\n cache[perm] = cnt\n return cnt\n return helper(tuple(range(1, n + 1)))\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n bitset_total = 2 ** n\n dp = [[(0) for _ in range(bitset_total)] for _ in range(n + 1)]\n dp[0][0] = 1\n for i in range(1, n + 1):\n for bm in range(bitset_total):\n for num in range(n):\n if bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n dp[i][bm] += dp[i - 1][bm ^ 1 << num]\n return dp[-1][-1]\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n @cache\n def dfs(bm, i):\n if i == 0:\n return 1\n cnt = 0\n for num in range(n):\n if not bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n cnt += dfs(bm ^ 1 << num, i - 1)\n return cnt\n return dfs(0, n)\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i - 1, nums - {num}) for num in nums if num %\n i == 0 or i % num == 0)\n return count(n, set(range(1, n + 1)))\n",
"<import token>\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n cache = {}\n\n def helper(perm):\n digits = len(perm)\n if digits == 1:\n return 1\n if perm in cache:\n return cache[perm]\n cnt = 0\n for i in range(digits):\n if perm[i] % digits == 0 or digits % perm[i] == 0:\n cnt += helper(perm[:i] + perm[i + 1:])\n cache[perm] = cnt\n return cnt\n return helper(tuple(range(1, n + 1)))\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n bitset_total = 2 ** n\n dp = [[(0) for _ in range(bitset_total)] for _ in range(n + 1)]\n dp[0][0] = 1\n for i in range(1, n + 1):\n for bm in range(bitset_total):\n for num in range(n):\n if bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n dp[i][bm] += dp[i - 1][bm ^ 1 << num]\n return dp[-1][-1]\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n @cache\n def dfs(bm, i):\n if i == 0:\n return 1\n cnt = 0\n for num in range(n):\n if not bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n cnt += dfs(bm ^ 1 << num, i - 1)\n return cnt\n return dfs(0, n)\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i - 1, nums - {num}) for num in nums if num %\n i == 0 or i % num == 0)\n return count(n, set(range(1, n + 1)))\n",
"<import token>\n\n\nclass Solution:\n <function token>\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n bitset_total = 2 ** n\n dp = [[(0) for _ in range(bitset_total)] for _ in range(n + 1)]\n dp[0][0] = 1\n for i in range(1, n + 1):\n for bm in range(bitset_total):\n for num in range(n):\n if bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n dp[i][bm] += dp[i - 1][bm ^ 1 << num]\n return dp[-1][-1]\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n @cache\n def dfs(bm, i):\n if i == 0:\n return 1\n cnt = 0\n for num in range(n):\n if not bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n cnt += dfs(bm ^ 1 << num, i - 1)\n return cnt\n return dfs(0, n)\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i - 1, nums - {num}) for num in nums if num %\n i == 0 or i % num == 0)\n return count(n, set(range(1, n + 1)))\n",
"<import token>\n<class token>\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n bitset_total = 2 ** n\n dp = [[(0) for _ in range(bitset_total)] for _ in range(n + 1)]\n dp[0][0] = 1\n for i in range(1, n + 1):\n for bm in range(bitset_total):\n for num in range(n):\n if bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n dp[i][bm] += dp[i - 1][bm ^ 1 << num]\n return dp[-1][-1]\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n @cache\n def dfs(bm, i):\n if i == 0:\n return 1\n cnt = 0\n for num in range(n):\n if not bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n cnt += dfs(bm ^ 1 << num, i - 1)\n return cnt\n return dfs(0, n)\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i - 1, nums - {num}) for num in nums if num %\n i == 0 or i % num == 0)\n return count(n, set(range(1, n + 1)))\n",
"<import token>\n<class token>\n\n\nclass Solution:\n <function token>\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n @cache\n def dfs(bm, i):\n if i == 0:\n return 1\n cnt = 0\n for num in range(n):\n if not bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n cnt += dfs(bm ^ 1 << num, i - 1)\n return cnt\n return dfs(0, n)\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i - 1, nums - {num}) for num in nums if num %\n i == 0 or i % num == 0)\n return count(n, set(range(1, n + 1)))\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n @cache\n def dfs(bm, i):\n if i == 0:\n return 1\n cnt = 0\n for num in range(n):\n if not bm & 1 << num and ((num + 1) % i == 0 or i % (num + \n 1) == 0):\n cnt += dfs(bm ^ 1 << num, i - 1)\n return cnt\n return dfs(0, n)\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i - 1, nums - {num}) for num in nums if num %\n i == 0 or i % num == 0)\n return count(n, set(range(1, n + 1)))\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Solution:\n <function token>\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i - 1, nums - {num}) for num in nums if num %\n i == 0 or i % num == 0)\n return count(n, set(range(1, n + 1)))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass Solution:\n\n def countArrangement(self, n: int) ->int:\n\n def count(i, nums):\n if i == 1:\n return 1\n return sum(count(i - 1, nums - {num}) for num in nums if num %\n i == 0 or i % num == 0)\n return count(n, set(range(1, n + 1)))\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass Solution:\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
509 |
fd5fca0e9abbb669ddff4d676147acc4344cdd1c
|
from django import forms
class RemoveProdutoDoCarrinhoForm(forms.Form):
class Meta:
fields = ('produto_id')
produto_id = forms.CharField(widget=forms.HiddenInput())
class QuantidadeForm(forms.Form):
class Meta:
fields = ('quantidade', 'produto_id')
# <input type="hidden" name="produto_id" id="id_produto_id" value="xxx">
produto_id = forms.CharField(widget=forms.HiddenInput())
quantidade = forms.IntegerField(
min_value=1,
max_value=1000,
error_messages={'required': 'Campo obrigatório.', },
widget=forms.TextInput(attrs={'class': 'form-control form-control-sm quantidade',
'maxlength': '20',
'onkeypress': 'return event.charCode >= 48 && event.charCode <= 57'}),
required=True)
|
[
"from django import forms\n\nclass RemoveProdutoDoCarrinhoForm(forms.Form):\n class Meta:\n fields = ('produto_id')\n\n produto_id = forms.CharField(widget=forms.HiddenInput())\n\nclass QuantidadeForm(forms.Form):\n class Meta:\n fields = ('quantidade', 'produto_id')\n\n # <input type=\"hidden\" name=\"produto_id\" id=\"id_produto_id\" value=\"xxx\">\n produto_id = forms.CharField(widget=forms.HiddenInput())\n\n quantidade = forms.IntegerField(\n min_value=1,\n max_value=1000,\n error_messages={'required': 'Campo obrigatório.', },\n widget=forms.TextInput(attrs={'class': 'form-control form-control-sm quantidade',\n 'maxlength': '20',\n 'onkeypress': 'return event.charCode >= 48 && event.charCode <= 57'}),\n required=True)\n",
"from django import forms\n\n\nclass RemoveProdutoDoCarrinhoForm(forms.Form):\n\n\n class Meta:\n fields = 'produto_id'\n produto_id = forms.CharField(widget=forms.HiddenInput())\n\n\nclass QuantidadeForm(forms.Form):\n\n\n class Meta:\n fields = 'quantidade', 'produto_id'\n produto_id = forms.CharField(widget=forms.HiddenInput())\n quantidade = forms.IntegerField(min_value=1, max_value=1000,\n error_messages={'required': 'Campo obrigatório.'}, widget=forms.\n TextInput(attrs={'class': 'form-control form-control-sm quantidade',\n 'maxlength': '20', 'onkeypress':\n 'return event.charCode >= 48 && event.charCode <= 57'}), required=True)\n",
"<import token>\n\n\nclass RemoveProdutoDoCarrinhoForm(forms.Form):\n\n\n class Meta:\n fields = 'produto_id'\n produto_id = forms.CharField(widget=forms.HiddenInput())\n\n\nclass QuantidadeForm(forms.Form):\n\n\n class Meta:\n fields = 'quantidade', 'produto_id'\n produto_id = forms.CharField(widget=forms.HiddenInput())\n quantidade = forms.IntegerField(min_value=1, max_value=1000,\n error_messages={'required': 'Campo obrigatório.'}, widget=forms.\n TextInput(attrs={'class': 'form-control form-control-sm quantidade',\n 'maxlength': '20', 'onkeypress':\n 'return event.charCode >= 48 && event.charCode <= 57'}), required=True)\n",
"<import token>\n\n\nclass RemoveProdutoDoCarrinhoForm(forms.Form):\n\n\n class Meta:\n fields = 'produto_id'\n <assignment token>\n\n\nclass QuantidadeForm(forms.Form):\n\n\n class Meta:\n fields = 'quantidade', 'produto_id'\n produto_id = forms.CharField(widget=forms.HiddenInput())\n quantidade = forms.IntegerField(min_value=1, max_value=1000,\n error_messages={'required': 'Campo obrigatório.'}, widget=forms.\n TextInput(attrs={'class': 'form-control form-control-sm quantidade',\n 'maxlength': '20', 'onkeypress':\n 'return event.charCode >= 48 && event.charCode <= 57'}), required=True)\n",
"<import token>\n<class token>\n\n\nclass QuantidadeForm(forms.Form):\n\n\n class Meta:\n fields = 'quantidade', 'produto_id'\n produto_id = forms.CharField(widget=forms.HiddenInput())\n quantidade = forms.IntegerField(min_value=1, max_value=1000,\n error_messages={'required': 'Campo obrigatório.'}, widget=forms.\n TextInput(attrs={'class': 'form-control form-control-sm quantidade',\n 'maxlength': '20', 'onkeypress':\n 'return event.charCode >= 48 && event.charCode <= 57'}), required=True)\n",
"<import token>\n<class token>\n\n\nclass QuantidadeForm(forms.Form):\n\n\n class Meta:\n fields = 'quantidade', 'produto_id'\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
510 |
d654aea3da3e36ccde8a5f4e03798a0dea5aad8a
|
import pandas as pd
import pyranges as pr
import numpy as np
import sys
import logging
from methplotlib.utils import file_sniffer
import pysam
class Methylation(object):
def __init__(self, table, data_type, name, called_sites):
self.table = table
self.data_type = data_type
self.name = name
self.called_sites = called_sites
def read_meth(filename, name, window, smoothen=5):
"""
converts a file from nanopolish to a pandas dataframe
input can be from calculate_methylation_frequency
which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'
smoothening the result by a rolling average
input can also be raw data per read, optionally phased
which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'
"""
file_type = file_sniffer(filename)
logging.info("File is of type {}".format(file_type))
try:
if file_type.startswith("nanopolish"):
return parse_nanopolish(filename, file_type, name, window, smoothen=smoothen)
elif file_type == "nanocompore":
return parse_nanocompore(filename, name, window)
elif file_type == "ont-cram":
return parse_ont_cram(filename, name, window)
except Exception:
sys.stderr.write("\n\n\nInput file {} not recognized!\n".format(filename))
sys.stderr.write("\n\n\nDetailed error:\n")
raise
def parse_nanopolish(filename, file_type, name, window, smoothen=5):
table = pd.read_csv(filename, sep="\t")
gr = pr.PyRanges(table.rename(columns={"start": "Start", "chromosome": "Chromosome",
"end": "End", "Strand": "strand"}))
logging.info("Read the file in a dataframe.")
if window:
gr = gr[window.chromosome, window.begin:window.end]
try:
gr.pos = np.floor(gr.drop().df[["Start", "End"]].mean(axis=1))
except KeyError:
sys.stderr.write("\n\n\nProblem parsing nanopolish file {}!\n".format(filename))
sys.stderr.write("Could it be that there are no calls in your selected window?\n")
sys.stderr.write("\n\n\nDetailed error:\n")
raise
table = gr.df
if file_type in ['nanopolish_call', 'nanopolish_phased']:
table = table.drop(columns=['Start', 'End', 'log_lik_methylated',
'log_lik_unmethylated', 'num_calling_strands',
'num_motifs', 'sequence'])
return Methylation(
table=table.sort_values(['read_name', 'pos']),
data_type=file_type,
name=name,
called_sites=len(table))
if file_type == "nanopolish_freq":
called_sites = table.called_sites
table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',
'called_sites', 'called_sites_methylated',
'group_sequence'])
return Methylation(
table=table.sort_values('pos')
.groupby('pos')
.mean()
.rolling(window=smoothen, center=True)
.mean(),
data_type=file_type,
name=name,
called_sites=called_sites.sum())
def parse_nanocompore(filename, name, window):
def nanocompore_columns_of_interest(column):
if column in ['pos', 'ref_id']:
return True
elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):
return True
else:
return False
table = pd.read_csv(filename, sep="\t", usecols=nanocompore_columns_of_interest)
if window:
table = table[table["ref_id"] == window.chromosome]
return Methylation(
table=table.sort_values('pos')
.append({'pos': window.end}, ignore_index=True)
.drop(columns="ref_id")
.fillna(1.0),
data_type='nanocompore',
name=name,
called_sites=len(table))
def parse_ont_cram(filename, name, window):
cram = pysam.AlignmentFile(filename, "rc")
data = []
for read in cram.fetch(reference=window.chromosome, start=window.begin, end=window.end):
if not read.is_supplementary and not read.is_secondary:
mod, positions, quals = get_modified_reference_positions(read)
for pos, qual in zip(positions, quals):
if pos is not None:
data.append((read.query_name,
'-' if read.is_reverse else '+',
pos,
qual,
mod))
return Methylation(
table=pd.DataFrame(data, columns=['read_name', 'strand', 'pos', 'quality', 'mod'])
.astype(dtype={'mod': 'category', 'quality': 'float'})
.sort_values(['read_name', 'pos']),
data_type="ont-cram",
name=name,
called_sites=len(data))
def get_modified_reference_positions(read):
if read.has_tag('MM'):
basemod = read.get_tag('MM').split(',', 1)[0]
if '-' in basemod:
sys.exit("ERROR: modifications on negative strand currently unsupported.")
base, mod = basemod.split('+')
deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]
probabilities = phred_to_probability(read.get_tag('MP'))
locations = np.cumsum(deltas) + np.concatenate(
(np.zeros(shape=1),
np.ones(shape=len(deltas) - 1))).astype('int')
base_index = np.array(
[i for i, letter in enumerate(read.get_forward_sequence()) if letter == base]
)
modified_bases = base_index[locations]
refpos = np.array(read.get_reference_positions(full_length=True))
if read.is_reverse:
refpos = np.flipud(refpos)
probabilities = probabilities[::-1]
return (basemod, refpos[modified_bases], probabilities)
else:
return (None, [None], [None])
def errs_tab(n):
"""Generate list of error rates for qualities less than equal than n."""
return [10**(q / -10) for q in range(n + 1)]
def phred_to_probability(quals, tab=errs_tab(128)):
return [tab[ord(q) - 33] for q in quals]
def get_data(methylation_files, names, window, smoothen=5):
"""
Import methylation data from all files in the list methylation_files
Data can be either frequency or raw.
data is extracted within the window args.window
Frequencies are smoothened using a sliding window
"""
return [read_meth(f, n, window, smoothen) for f, n in zip(methylation_files, names)]
|
[
"import pandas as pd\nimport pyranges as pr\nimport numpy as np\nimport sys\nimport logging\nfrom methplotlib.utils import file_sniffer\nimport pysam\n\n\nclass Methylation(object):\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info(\"File is of type {}\".format(file_type))\n try:\n if file_type.startswith(\"nanopolish\"):\n return parse_nanopolish(filename, file_type, name, window, smoothen=smoothen)\n elif file_type == \"nanocompore\":\n return parse_nanocompore(filename, name, window)\n elif file_type == \"ont-cram\":\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write(\"\\n\\n\\nInput file {} not recognized!\\n\".format(filename))\n sys.stderr.write(\"\\n\\n\\nDetailed error:\\n\")\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep=\"\\t\")\n gr = pr.PyRanges(table.rename(columns={\"start\": \"Start\", \"chromosome\": \"Chromosome\",\n \"end\": \"End\", \"Strand\": \"strand\"}))\n logging.info(\"Read the file in a dataframe.\")\n\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[[\"Start\", \"End\"]].mean(axis=1))\n except KeyError:\n sys.stderr.write(\"\\n\\n\\nProblem parsing nanopolish file {}!\\n\".format(filename))\n sys.stderr.write(\"Could it be that there are no calls in your selected window?\\n\")\n sys.stderr.write(\"\\n\\n\\nDetailed error:\\n\")\n raise\n\n table = gr.df\n\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands',\n 'num_motifs', 'sequence'])\n return Methylation(\n table=table.sort_values(['read_name', 'pos']),\n data_type=file_type,\n name=name,\n called_sites=len(table))\n if file_type == \"nanopolish_freq\":\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated',\n 'group_sequence'])\n return Methylation(\n table=table.sort_values('pos')\n .groupby('pos')\n .mean()\n .rolling(window=smoothen, center=True)\n .mean(),\n data_type=file_type,\n name=name,\n called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep=\"\\t\", usecols=nanocompore_columns_of_interest)\n if window:\n table = table[table[\"ref_id\"] == window.chromosome]\n return Methylation(\n table=table.sort_values('pos')\n .append({'pos': window.end}, ignore_index=True)\n .drop(columns=\"ref_id\")\n .fillna(1.0),\n data_type='nanocompore',\n name=name,\n called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, \"rc\")\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin, end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name,\n '-' if read.is_reverse else '+',\n pos,\n qual,\n mod))\n return Methylation(\n table=pd.DataFrame(data, columns=['read_name', 'strand', 'pos', 'quality', 'mod'])\n .astype(dtype={'mod': 'category', 'quality': 'float'})\n .sort_values(['read_name', 'pos']),\n data_type=\"ont-cram\",\n name=name,\n called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\"ERROR: modifications on negative strand currently unsupported.\")\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate(\n (np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array(\n [i for i, letter in enumerate(read.get_forward_sequence()) if letter == base]\n )\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return (basemod, refpos[modified_bases], probabilities)\n else:\n return (None, [None], [None])\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [10**(q / -10) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(methylation_files, names)]\n",
"import pandas as pd\nimport pyranges as pr\nimport numpy as np\nimport sys\nimport logging\nfrom methplotlib.utils import file_sniffer\nimport pysam\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\ndef phred_to_probability(quals, tab=errs_tab(128)):\n return [tab[ord(q) - 33] for q in quals]\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<function token>\n\n\ndef get_data(methylation_files, names, window, smoothen=5):\n \"\"\"\n Import methylation data from all files in the list methylation_files\n\n Data can be either frequency or raw.\n\n data is extracted within the window args.window\n Frequencies are smoothened using a sliding window\n \"\"\"\n return [read_meth(f, n, window, smoothen) for f, n in zip(\n methylation_files, names)]\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\ndef get_modified_reference_positions(read):\n if read.has_tag('MM'):\n basemod = read.get_tag('MM').split(',', 1)[0]\n if '-' in basemod:\n sys.exit(\n 'ERROR: modifications on negative strand currently unsupported.'\n )\n base, mod = basemod.split('+')\n deltas = [int(i) for i in read.get_tag('MM').split(',')[1:]]\n probabilities = phred_to_probability(read.get_tag('MP'))\n locations = np.cumsum(deltas) + np.concatenate((np.zeros(shape=1),\n np.ones(shape=len(deltas) - 1))).astype('int')\n base_index = np.array([i for i, letter in enumerate(read.\n get_forward_sequence()) if letter == base])\n modified_bases = base_index[locations]\n refpos = np.array(read.get_reference_positions(full_length=True))\n if read.is_reverse:\n refpos = np.flipud(refpos)\n probabilities = probabilities[::-1]\n return basemod, refpos[modified_bases], probabilities\n else:\n return None, [None], [None]\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\ndef parse_nanocompore(filename, name, window):\n\n def nanocompore_columns_of_interest(column):\n if column in ['pos', 'ref_id']:\n return True\n elif column.endswith('pvalue_context_2') or column.endswith('pvalue'):\n return True\n else:\n return False\n table = pd.read_csv(filename, sep='\\t', usecols=\n nanocompore_columns_of_interest)\n if window:\n table = table[table['ref_id'] == window.chromosome]\n return Methylation(table=table.sort_values('pos').append({'pos': window\n .end}, ignore_index=True).drop(columns='ref_id').fillna(1.0),\n data_type='nanocompore', name=name, called_sites=len(table))\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\n<function token>\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\n<function token>\n\n\ndef parse_ont_cram(filename, name, window):\n cram = pysam.AlignmentFile(filename, 'rc')\n data = []\n for read in cram.fetch(reference=window.chromosome, start=window.begin,\n end=window.end):\n if not read.is_supplementary and not read.is_secondary:\n mod, positions, quals = get_modified_reference_positions(read)\n for pos, qual in zip(positions, quals):\n if pos is not None:\n data.append((read.query_name, '-' if read.is_reverse else\n '+', pos, qual, mod))\n return Methylation(table=pd.DataFrame(data, columns=['read_name',\n 'strand', 'pos', 'quality', 'mod']).astype(dtype={'mod': 'category',\n 'quality': 'float'}).sort_values(['read_name', 'pos']), data_type=\n 'ont-cram', name=name, called_sites=len(data))\n\n\n<function token>\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\ndef read_meth(filename, name, window, smoothen=5):\n \"\"\"\n converts a file from nanopolish to a pandas dataframe\n input can be from calculate_methylation_frequency\n which will return a dataframe with 'chromosome', 'pos', 'methylated_frequency'\n smoothening the result by a rolling average\n\n input can also be raw data per read, optionally phased\n which will return a dataframe with 'read', 'chromosome', 'pos', 'log_lik_ratio', 'strand'\n \"\"\"\n file_type = file_sniffer(filename)\n logging.info('File is of type {}'.format(file_type))\n try:\n if file_type.startswith('nanopolish'):\n return parse_nanopolish(filename, file_type, name, window,\n smoothen=smoothen)\n elif file_type == 'nanocompore':\n return parse_nanocompore(filename, name, window)\n elif file_type == 'ont-cram':\n return parse_ont_cram(filename, name, window)\n except Exception:\n sys.stderr.write('\\n\\n\\nInput file {} not recognized!\\n'.format(\n filename))\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\n<function token>\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef errs_tab(n):\n \"\"\"Generate list of error rates for qualities less than equal than n.\"\"\"\n return [(10 ** (q / -10)) for q in range(n + 1)]\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\n<function token>\n\n\ndef parse_nanopolish(filename, file_type, name, window, smoothen=5):\n table = pd.read_csv(filename, sep='\\t')\n gr = pr.PyRanges(table.rename(columns={'start': 'Start', 'chromosome':\n 'Chromosome', 'end': 'End', 'Strand': 'strand'}))\n logging.info('Read the file in a dataframe.')\n if window:\n gr = gr[window.chromosome, window.begin:window.end]\n try:\n gr.pos = np.floor(gr.drop().df[['Start', 'End']].mean(axis=1))\n except KeyError:\n sys.stderr.write('\\n\\n\\nProblem parsing nanopolish file {}!\\n'.\n format(filename))\n sys.stderr.write(\n 'Could it be that there are no calls in your selected window?\\n')\n sys.stderr.write('\\n\\n\\nDetailed error:\\n')\n raise\n table = gr.df\n if file_type in ['nanopolish_call', 'nanopolish_phased']:\n table = table.drop(columns=['Start', 'End', 'log_lik_methylated',\n 'log_lik_unmethylated', 'num_calling_strands', 'num_motifs',\n 'sequence'])\n return Methylation(table=table.sort_values(['read_name', 'pos']),\n data_type=file_type, name=name, called_sites=len(table))\n if file_type == 'nanopolish_freq':\n called_sites = table.called_sites\n table = table.drop(columns=['Start', 'End', 'num_motifs_in_group',\n 'called_sites', 'called_sites_methylated', 'group_sequence'])\n return Methylation(table=table.sort_values('pos').groupby('pos').\n mean().rolling(window=smoothen, center=True).mean(), data_type=\n file_type, name=name, called_sites=called_sites.sum())\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Methylation(object):\n\n def __init__(self, table, data_type, name, called_sites):\n self.table = table\n self.data_type = data_type\n self.name = name\n self.called_sites = called_sites\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Methylation(object):\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
511 |
6ee36994f63d64e35c4e76f65e9c4f09797a161e
|
class Node:
def __init__(self, info):
self.info = info
self.left = None
self.right = None
self.level = None
def __str__(self):
return str(self.info)
class BinarySearchTree:
def __init__(self):
self.root = None
def create(self, val):
if self.root == None:
self.root = Node(val)
else:
current = self.root
while True:
if val < current.info:
if current.left:
current = current.left
else:
current.left = Node(val)
break
elif val > current.info:
if current.right:
current = current.right
else:
current.right = Node(val)
break
else:
break
# Enter your code here. Read input from STDIN. Print output to STDOUT
'''
class Node:
def __init__(self,info):
self.info = info
self.left = None
self.right = None
// this is a node of the tree , which contains info as data, left , right
Approach:
1. Find a path from the root to n1 and store it in a vector or array.
2. Find a path from the root to n2 and store it in another vector or array.
3. Traverse both paths till the values in arrays are the same. Return the common element just before the mismatch.
'''
# Time complexity: O(n)
def lca(root, v1, v2):
def find_path(r, p, n):
if not r: return False
# Append the root
p.append(r)
if r.info == n:
return True
# Check if k is found in left or right sub-tree
if (r.left and find_path(r.left, p, n)) or (r.right and find_path(r.right, p, n)):
return True
# If not present in subtree rooted with root, remove
# root from path and return False
p.pop()
return False
p1 = []
p2 = []
if not find_path(root, p1, v1) or not find_path(root, p2, v2):
return -1
i = 0
while i < len(p1) and i < len(p2):
if p1[i].info != p2[i].info:
break
i += 1
return p1[i-1]
tree = BinarySearchTree()
|
[
"class Node:\n def __init__(self, info): \n self.info = info \n self.left = None \n self.right = None \n self.level = None \n\n def __str__(self):\n return str(self.info) \n\nclass BinarySearchTree:\n def __init__(self): \n self.root = None\n\n def create(self, val): \n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n \n while True:\n if val < current.info:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.info:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n'''\nclass Node:\n def __init__(self,info): \n self.info = info \n self.left = None \n self.right = None \n \n\n // this is a node of the tree , which contains info as data, left , right\n \nApproach:\n1. Find a path from the root to n1 and store it in a vector or array. \n2. Find a path from the root to n2 and store it in another vector or array. \n3. Traverse both paths till the values in arrays are the same. Return the common element just before the mismatch. \n'''\n# Time complexity: O(n)\n\n\ndef lca(root, v1, v2):\n def find_path(r, p, n):\n if not r: return False\n # Append the root\n p.append(r)\n \n if r.info == n:\n return True\n \n # Check if k is found in left or right sub-tree\n if (r.left and find_path(r.left, p, n)) or (r.right and find_path(r.right, p, n)):\n return True\n \n # If not present in subtree rooted with root, remove\n # root from path and return False\n \n p.pop()\n return False\n\n p1 = []\n p2 = []\n \n if not find_path(root, p1, v1) or not find_path(root, p2, v2):\n return -1\n\n i = 0\n while i < len(p1) and i < len(p2):\n if p1[i].info != p2[i].info:\n break\n i += 1\n \n return p1[i-1]\n\ntree = BinarySearchTree()\n",
"class Node:\n\n def __init__(self, info):\n self.info = info\n self.left = None\n self.right = None\n self.level = None\n\n def __str__(self):\n return str(self.info)\n\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n\n def create(self, val):\n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n while True:\n if val < current.info:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.info:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break\n\n\n<docstring token>\n\n\ndef lca(root, v1, v2):\n\n def find_path(r, p, n):\n if not r:\n return False\n p.append(r)\n if r.info == n:\n return True\n if r.left and find_path(r.left, p, n) or r.right and find_path(r.\n right, p, n):\n return True\n p.pop()\n return False\n p1 = []\n p2 = []\n if not find_path(root, p1, v1) or not find_path(root, p2, v2):\n return -1\n i = 0\n while i < len(p1) and i < len(p2):\n if p1[i].info != p2[i].info:\n break\n i += 1\n return p1[i - 1]\n\n\ntree = BinarySearchTree()\n",
"class Node:\n\n def __init__(self, info):\n self.info = info\n self.left = None\n self.right = None\n self.level = None\n\n def __str__(self):\n return str(self.info)\n\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n\n def create(self, val):\n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n while True:\n if val < current.info:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.info:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break\n\n\n<docstring token>\n\n\ndef lca(root, v1, v2):\n\n def find_path(r, p, n):\n if not r:\n return False\n p.append(r)\n if r.info == n:\n return True\n if r.left and find_path(r.left, p, n) or r.right and find_path(r.\n right, p, n):\n return True\n p.pop()\n return False\n p1 = []\n p2 = []\n if not find_path(root, p1, v1) or not find_path(root, p2, v2):\n return -1\n i = 0\n while i < len(p1) and i < len(p2):\n if p1[i].info != p2[i].info:\n break\n i += 1\n return p1[i - 1]\n\n\n<assignment token>\n",
"class Node:\n\n def __init__(self, info):\n self.info = info\n self.left = None\n self.right = None\n self.level = None\n\n def __str__(self):\n return str(self.info)\n\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n\n def create(self, val):\n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n while True:\n if val < current.info:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.info:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break\n\n\n<docstring token>\n<function token>\n<assignment token>\n",
"class Node:\n <function token>\n\n def __str__(self):\n return str(self.info)\n\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n\n def create(self, val):\n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n while True:\n if val < current.info:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.info:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break\n\n\n<docstring token>\n<function token>\n<assignment token>\n",
"class Node:\n <function token>\n <function token>\n\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n\n def create(self, val):\n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n while True:\n if val < current.info:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.info:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break\n\n\n<docstring token>\n<function token>\n<assignment token>\n",
"<class token>\n\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n\n def create(self, val):\n if self.root == None:\n self.root = Node(val)\n else:\n current = self.root\n while True:\n if val < current.info:\n if current.left:\n current = current.left\n else:\n current.left = Node(val)\n break\n elif val > current.info:\n if current.right:\n current = current.right\n else:\n current.right = Node(val)\n break\n else:\n break\n\n\n<docstring token>\n<function token>\n<assignment token>\n",
"<class token>\n\n\nclass BinarySearchTree:\n\n def __init__(self):\n self.root = None\n <function token>\n\n\n<docstring token>\n<function token>\n<assignment token>\n",
"<class token>\n\n\nclass BinarySearchTree:\n <function token>\n <function token>\n\n\n<docstring token>\n<function token>\n<assignment token>\n",
"<class token>\n<class token>\n<docstring token>\n<function token>\n<assignment token>\n"
] | false |
512 |
207b6e56b683c0b069c531a4c6076c2822814390
|
file = open("yo.txt", "wr")
file.write("Yo")
|
[
"file = open(\"yo.txt\", \"wr\")\n\nfile.write(\"Yo\")\n\n",
"file = open('yo.txt', 'wr')\nfile.write('Yo')\n",
"<assignment token>\nfile.write('Yo')\n",
"<assignment token>\n<code token>\n"
] | false |
513 |
e6af221f1d6397d0fc52671cdd27d43549d0aecb
|
__author__ = 'jjpr'
import pyrr
import barleycorn as bc
def test_xyz123():
cone_x = bc.primitives.Cone(1.0, 1.0)
|
[
"__author__ = 'jjpr'\n\nimport pyrr\nimport barleycorn as bc\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n",
"__author__ = 'jjpr'\nimport pyrr\nimport barleycorn as bc\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n",
"__author__ = 'jjpr'\n<import token>\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n",
"<assignment token>\n<import token>\n\n\ndef test_xyz123():\n cone_x = bc.primitives.Cone(1.0, 1.0)\n",
"<assignment token>\n<import token>\n<function token>\n"
] | false |
514 |
ce75c23c6b0862dde797225f53c900b4ebc56428
|
from bbdd import *
def usuario():
global usser
usser=input("Introduce un usuario : ")
if len(usser)<5 or len(usser)>15:
print("El usuario debe tener entre 5 y 15 caracteres")
usuario()
elif usser.isalnum()==False:
print("Los valores del usurio deben ser únicamente letras o números")
usuario()
else:
print(True)
def contraseña():
global passw
passw=input("Introduce contraseña: ")
if len(passw)<=9:
print("La contraseña debe tener al menos 10 caractéres")
contraseña()
elif passw.isalnum()==True:
print ("La contraseña debe tener al menos un carácter no alfanumérico")
contraseña()
elif passw.lower() == passw:
print("Debe haber por lo menos una mayúscula")
contraseña()
elif passw.upper()==passw:
print("Debe haber por lo menos una minúscula")
contraseña()
for i in passw:
if i==" ":
print("La contraseña no debe tener espacios en blanco")
contraseña()
print(True)
|
[
"from bbdd import *\n\n\ndef usuario():\n global usser\n usser=input(\"Introduce un usuario : \")\n if len(usser)<5 or len(usser)>15:\n print(\"El usuario debe tener entre 5 y 15 caracteres\")\n usuario()\n elif usser.isalnum()==False:\n print(\"Los valores del usurio deben ser únicamente letras o números\")\n usuario()\n else:\n print(True)\n\n\n\ndef contraseña():\n global passw\n passw=input(\"Introduce contraseña: \")\n if len(passw)<=9:\n print(\"La contraseña debe tener al menos 10 caractéres\")\n contraseña()\n elif passw.isalnum()==True:\n print (\"La contraseña debe tener al menos un carácter no alfanumérico\")\n contraseña()\n elif passw.lower() == passw:\n print(\"Debe haber por lo menos una mayúscula\")\n contraseña()\n elif passw.upper()==passw:\n print(\"Debe haber por lo menos una minúscula\")\n contraseña()\n\n for i in passw:\n if i==\" \":\n print(\"La contraseña no debe tener espacios en blanco\")\n contraseña()\n print(True)\n",
"from bbdd import *\n\n\ndef usuario():\n global usser\n usser = input('Introduce un usuario : ')\n if len(usser) < 5 or len(usser) > 15:\n print('El usuario debe tener entre 5 y 15 caracteres')\n usuario()\n elif usser.isalnum() == False:\n print('Los valores del usurio deben ser únicamente letras o números')\n usuario()\n else:\n print(True)\n\n\ndef contraseña():\n global passw\n passw = input('Introduce contraseña: ')\n if len(passw) <= 9:\n print('La contraseña debe tener al menos 10 caractéres')\n contraseña()\n elif passw.isalnum() == True:\n print('La contraseña debe tener al menos un carácter no alfanumérico')\n contraseña()\n elif passw.lower() == passw:\n print('Debe haber por lo menos una mayúscula')\n contraseña()\n elif passw.upper() == passw:\n print('Debe haber por lo menos una minúscula')\n contraseña()\n for i in passw:\n if i == ' ':\n print('La contraseña no debe tener espacios en blanco')\n contraseña()\n print(True)\n",
"<import token>\n\n\ndef usuario():\n global usser\n usser = input('Introduce un usuario : ')\n if len(usser) < 5 or len(usser) > 15:\n print('El usuario debe tener entre 5 y 15 caracteres')\n usuario()\n elif usser.isalnum() == False:\n print('Los valores del usurio deben ser únicamente letras o números')\n usuario()\n else:\n print(True)\n\n\ndef contraseña():\n global passw\n passw = input('Introduce contraseña: ')\n if len(passw) <= 9:\n print('La contraseña debe tener al menos 10 caractéres')\n contraseña()\n elif passw.isalnum() == True:\n print('La contraseña debe tener al menos un carácter no alfanumérico')\n contraseña()\n elif passw.lower() == passw:\n print('Debe haber por lo menos una mayúscula')\n contraseña()\n elif passw.upper() == passw:\n print('Debe haber por lo menos una minúscula')\n contraseña()\n for i in passw:\n if i == ' ':\n print('La contraseña no debe tener espacios en blanco')\n contraseña()\n print(True)\n",
"<import token>\n<function token>\n\n\ndef contraseña():\n global passw\n passw = input('Introduce contraseña: ')\n if len(passw) <= 9:\n print('La contraseña debe tener al menos 10 caractéres')\n contraseña()\n elif passw.isalnum() == True:\n print('La contraseña debe tener al menos un carácter no alfanumérico')\n contraseña()\n elif passw.lower() == passw:\n print('Debe haber por lo menos una mayúscula')\n contraseña()\n elif passw.upper() == passw:\n print('Debe haber por lo menos una minúscula')\n contraseña()\n for i in passw:\n if i == ' ':\n print('La contraseña no debe tener espacios en blanco')\n contraseña()\n print(True)\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
515 |
345967e2aeafda6ce30cbbbbacf976c97b17def7
|
myDict={'Friends': ['AP', 'Soham', 'Baba'],
'Likes': ['Math', 'Programming'],
'languages': ['C++', 'Python', 'Java']}
myInt=123
myFloat=12.3333
myName='Somesh Thakur'
|
[
"myDict={'Friends': ['AP', 'Soham', 'Baba'],\r\n 'Likes': ['Math', 'Programming'],\r\n 'languages': ['C++', 'Python', 'Java']}\r\nmyInt=123\r\nmyFloat=12.3333\r\nmyName='Somesh Thakur'",
"myDict = {'Friends': ['AP', 'Soham', 'Baba'], 'Likes': ['Math',\n 'Programming'], 'languages': ['C++', 'Python', 'Java']}\nmyInt = 123\nmyFloat = 12.3333\nmyName = 'Somesh Thakur'\n",
"<assignment token>\n"
] | false |
516 |
f70f66926b9e2bf8b387d481263493d7f4c65397
|
""""
articulo
cliente
venta
ventadet
"""
class Articulo:
def __init__(self,cod,des,pre,stoc):
self.codigo=cod
self.descripcion = des
self.precio=pre
self.stock=stoc
class ventaDetalle:
def __init__(self,pro,pre,cant):
self.producto=pro
self.precio=pre
self.cantidad=cant
|
[
"\"\"\"\"\r\narticulo\r\ncliente\r\nventa\r\nventadet\r\n\"\"\"\r\nclass Articulo:\r\n def __init__(self,cod,des,pre,stoc):\r\n self.codigo=cod\r\n self.descripcion = des\r\n self.precio=pre\r\n self.stock=stoc\r\n\r\n\r\n\r\n\r\n\r\nclass ventaDetalle:\r\n def __init__(self,pro,pre,cant):\r\n self.producto=pro\r\n self.precio=pre\r\n self.cantidad=cant\r\n",
"<docstring token>\n\n\nclass Articulo:\n\n def __init__(self, cod, des, pre, stoc):\n self.codigo = cod\n self.descripcion = des\n self.precio = pre\n self.stock = stoc\n\n\nclass ventaDetalle:\n\n def __init__(self, pro, pre, cant):\n self.producto = pro\n self.precio = pre\n self.cantidad = cant\n",
"<docstring token>\n\n\nclass Articulo:\n <function token>\n\n\nclass ventaDetalle:\n\n def __init__(self, pro, pre, cant):\n self.producto = pro\n self.precio = pre\n self.cantidad = cant\n",
"<docstring token>\n<class token>\n\n\nclass ventaDetalle:\n\n def __init__(self, pro, pre, cant):\n self.producto = pro\n self.precio = pre\n self.cantidad = cant\n",
"<docstring token>\n<class token>\n\n\nclass ventaDetalle:\n <function token>\n",
"<docstring token>\n<class token>\n<class token>\n"
] | false |
517 |
b8fcd8e6dce8d210576bc4166dd258e5fd51278d
|
"""
This module contains the logic to resolve the head-tail orientation of a predicted video time series.
"""
import logging
import numpy as np
import numpy.ma as ma
from wormpose.pose.distance_metrics import angle_distance, skeleton_distance
from wormpose.pose.results_datatypes import (
BaseResults,
ShuffledResults,
OriginalResults,
)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment
CONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)
# we consider frames to be part of the same segment if they are maximum this amount of seconds apart
# (and satisfy the distance threshold)
CONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2
# discard too small segments less than this amount of seconds
MIN_SEGMENT_SIZE_SEC = 0.2
# don't align isolated segments that are more than this amount of seconds apart from aligned segments
MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1
def _init_partitioned_series(shuffled_series: np.ndarray):
return ma.masked_all_like(shuffled_series)
def _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int):
partitioned_series[frame_index][0] = shuffled_series[frame_index, partition]
partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition]
class _PartitionedResults(BaseResults):
def __init__(self, shuffled_results: ShuffledResults):
self.cur_partition = -1
self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)
self._shuffled_results = shuffled_results
theta = _init_partitioned_series(shuffled_results.theta)
skeletons = _init_partitioned_series(shuffled_results.skeletons)
scores = _init_partitioned_series(shuffled_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
self.partitions.mask[indices] = True
def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):
if new_partition:
self.cur_partition += 1
_set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)
_set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)
_set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)
self.partitions[frame_index] = self.cur_partition
def _get_partition_indices(self, partition_index: int):
return np.where(self.partitions == partition_index)[0]
def get_segments(self):
all_partitions_indexes = np.unique(self.partitions.filled(-1))
return [
self._get_partition_indices(partition_index)
for partition_index in all_partitions_indexes
if partition_index >= 0
]
class _ResolvedResults(BaseResults):
def __init__(self, partitioned_results: _PartitionedResults):
self._partitioned_results = partitioned_results
theta = _init_unified_series(partitioned_results.theta)
skeletons = _init_unified_series(partitioned_results.skeletons)
scores = _init_unified_series(partitioned_results.scores)
super().__init__(theta=theta, skeletons=skeletons, scores=scores)
def resolve(self, segment, segment_alignment):
self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]
self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]
self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]
def mask(self, indices):
self.theta.mask[indices] = True
self.skeletons.mask[indices] = True
self.scores.mask[indices] = True
def num_valid(self):
return np.sum(~self.scores.mask)
class _FinalResults(BaseResults):
@classmethod
def from_resolved(cls, resolved_results: _ResolvedResults):
return _FinalResults(
theta=resolved_results.theta.filled(np.nan),
skeletons=resolved_results.skeletons.filled(np.nan),
scores=resolved_results.scores.filled(np.nan),
)
@classmethod
def from_shuffled(cls, shuffled_results: ShuffledResults):
return _FinalResults(
theta=np.full_like(shuffled_results.theta[:, 0], np.nan),
skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),
scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),
)
def _make_continuous_partitions(
shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float
) -> _PartitionedResults:
time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))
min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))
partitioned_results = _PartitionedResults(shuffled_results)
# discard low score frames early (use the maximum value of both scores for now)
good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]
for frame_index in good_score_frames:
prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]
# if there is a big gap > time_window we start a new partition, with a random value (0)
if np.all(np.any(prev_theta.mask, axis=1)):
partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)
# otherwise we look in the time_window close past the closest non nan frame see if we can continue the
# partition as long as the values stay continuous
else:
last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]
dists = [
angle_distance(
shuffled_results.theta[frame_index, k, :],
prev_theta[last_valid_index],
)
for k in range(2)
]
partition = int(np.argmin(dists))
if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:
partitioned_results.set_partition(frame_index=frame_index, partition=partition)
# discard short segments
for cur_partition_indices in partitioned_results.get_segments():
if len(cur_partition_indices) < min_segment_size:
partitioned_results.mask(cur_partition_indices)
return partitioned_results
def _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):
"""
Match the head/tail alignment with the results of the classical tracking in each of the segments,
if there is enough labelled data in the segment
"""
segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)
for segment_index, segment in enumerate(segments):
segment_skeletons = labelled_skeletons[segment]
non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))
labels_count = np.sum(non_nan_labelled)
non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))
to_compare = np.logical_and(non_nan_labelled, non_masked)
similarity_scores = []
for label_skel, partitioned_skeleton in zip(
segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]
):
dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]
similarity_scores.append(dists)
if len(similarity_scores) > 0:
mean_similarity_scores = np.mean(similarity_scores, axis=0)
if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:
segments_alignment[segment_index] = np.argmax(mean_similarity_scores)
return segments_alignment
def _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):
# evaluate how far away this segment is from known values
score = np.nan
segment_offset = np.nan
if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:
gap = segments[segment_index][0] - segments[segment_index - 1][-1]
score = gap
segment_offset = -1
if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:
gap = segments[segment_index + 1][0] - segments[segment_index][-1]
if np.isnan(score) or gap < score:
score = gap
segment_offset = 1
return score, segment_offset
def _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):
"""
Resolve the unaligned segments by comparing with adjacent segments,
starting with the segments that have the least frames gap between an adjacent trusted segment
Don't align isolated segments which a big gap between trusted segments
"""
maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))
# ensure that if no segments have been aligned at all, pick one solution randomly to start
if np.all(segments_alignment.mask):
logger.info("There are no trusted segments with head decision to resolve the whole video, stopping analysis.")
return segments_alignment
# fix in priority the segments with known adjacent frames with little gap
# until all segments are aligned except the isolated ones (further than maximum_gap_allowed)
unaligned = np.where(segments_alignment.mask)[0]
while len(unaligned) > 0:
# we first pick the best candidate segment to align (there are known frames nearby before or after or both)
all_gaps = [
_calculate_smallest_gap_to_adjacent(
segment_index=x,
segments=segments,
segments_alignment=segments_alignment,
)
for x in unaligned
]
segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]
gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]
# abort if only isolated segments are left
if gap_to_adjacent_segment > maximum_gap_allowed:
break
cur_segment_index = unaligned[segment_to_fix_index]
cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]
adjacent_segment_index = cur_segment_index + adjacent_segment_offset
adjacent_alignment = segments_alignment[adjacent_segment_index]
adjacent_segment = segments[adjacent_segment_index]
adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment]
if adjacent_segment_offset == -1:
closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment
closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment
elif adjacent_segment_offset == 1:
closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment
closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment
else:
raise ValueError()
dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton]
segments_alignment[cur_segment_index] = int(np.argmax(dists))
unaligned = np.where(segments_alignment.mask)[0]
return segments_alignment
def _init_unified_series(mixed_series):
return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype)
def resolve_head_tail(
shuffled_results: ShuffledResults,
original_results: OriginalResults,
frame_rate: float,
score_threshold,
) -> BaseResults:
len_series = len(shuffled_results)
# Create continuous segments without jumps
partitioned_results = _make_continuous_partitions(
score_threshold=score_threshold,
frame_rate=frame_rate,
shuffled_results=shuffled_results,
)
segments = partitioned_results.get_segments()
if len(segments) == 0:
logger.error(
f"Couldn't find any continuous segments of predicted data above the threshold {score_threshold},"
f" stopping analysis."
)
return _FinalResults.from_shuffled(shuffled_results)
# Choose each segment global alignment by comparing with labelled data
segments_alignment = _align_segments_with_labels(
segments, partitioned_results.skeletons, original_results.skeletons
)
# Fix unaligned segments here by comparing skeletons with neighboring segments iteratively
segments_alignment = _align_unlabelled_segments_with_adjacents(
segments, segments_alignment, partitioned_results.skeletons, frame_rate
)
# Compile results
resolved_results = _ResolvedResults(partitioned_results)
for segment, segment_alignment in zip(segments, segments_alignment):
if not ma.is_masked(segment_alignment):
resolved_results.resolve(segment, segment_alignment)
# Filter the final results again by score threshold
low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0]
resolved_results.mask(low_scores_indices)
num_success = resolved_results.num_valid()
original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum()
logger.info(
f"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully "
f"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}"
f" or {(float(original_num_success) / len_series * 100):.1f}% of total)"
)
if num_success < original_num_success:
logger.warning(f"Original results had {original_num_success - num_success} more successfully analyzed frames!")
return _FinalResults.from_resolved(resolved_results)
|
[
"\"\"\"\nThis module contains the logic to resolve the head-tail orientation of a predicted video time series.\n\"\"\"\n\nimport logging\n\nimport numpy as np\nimport numpy.ma as ma\n\nfrom wormpose.pose.distance_metrics import angle_distance, skeleton_distance\nfrom wormpose.pose.results_datatypes import (\n BaseResults,\n ShuffledResults,\n OriginalResults,\n)\n\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\n# threshold to compare neighbor frames theta, to be considered continuous and belong to the same segment\nCONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)\n\n# we consider frames to be part of the same segment if they are maximum this amount of seconds apart\n# (and satisfy the distance threshold)\nCONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2\n\n# discard too small segments less than this amount of seconds\nMIN_SEGMENT_SIZE_SEC = 0.2\n\n# don't align isolated segments that are more than this amount of seconds apart from aligned segments\nMAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int, partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 - partition]\n\n\nclass _PartitionedResults(BaseResults):\n def __init__(self, shuffled_results: ShuffledResults):\n\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition: bool = False):\n if new_partition:\n self.cur_partition += 1\n\n _set_partition(self.theta, self._shuffled_results.theta, frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons, frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores, frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [\n self._get_partition_indices(partition_index)\n for partition_index in all_partitions_indexes\n if partition_index >= 0\n ]\n\n\nclass _ResolvedResults(BaseResults):\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:, segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][:, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:, segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(\n theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan),\n scores=resolved_results.scores.filled(np.nan),\n )\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(\n theta=np.full_like(shuffled_results.theta[:, 0], np.nan),\n skeletons=np.full_like(shuffled_results.scores[:, 0], np.nan),\n scores=np.full_like(shuffled_results.skeletons[:, 0], np.nan),\n )\n\n\ndef _make_continuous_partitions(\n shuffled_results: ShuffledResults, score_threshold: float, frame_rate: float\n) -> _PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n\n partitioned_results = _PartitionedResults(shuffled_results)\n\n # discard low score frames early (use the maximum value of both scores for now)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.scores, axis=1), score_threshold))[0]\n\n for frame_index in good_score_frames:\n\n prev_theta = partitioned_results.theta[frame_index - min(time_window, frame_index) : frame_index, 0]\n\n # if there is a big gap > time_window we start a new partition, with a random value (0)\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index, partition=0, new_partition=True)\n # otherwise we look in the time_window close past the closest non nan frame see if we can continue the\n # partition as long as the values stay continuous\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1]\n dists = [\n angle_distance(\n shuffled_results.theta[frame_index, k, :],\n prev_theta[last_valid_index],\n )\n for k in range(2)\n ]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index, partition=partition)\n\n # discard short segments\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons, labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(\n segment_skeletons[to_compare], partitioned_skeletons[segment][to_compare]\n ):\n dists = [skeleton_distance(label_skel, x) for x in partitioned_skeleton]\n similarity_scores.append(dists)\n\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(mean_similarity_scores)\n\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments, segments_alignment):\n # evaluate how far away this segment is from known values\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment, partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate * MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n # ensure that if no segments have been aligned at all, pick one solution randomly to start\n if np.all(segments_alignment.mask):\n logger.info(\"There are no trusted segments with head decision to resolve the whole video, stopping analysis.\")\n return segments_alignment\n\n # fix in priority the segments with known adjacent frames with little gap\n # until all segments are aligned except the isolated ones (further than maximum_gap_allowed)\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n # we first pick the best candidate segment to align (there are known frames nearby before or after or both)\n all_gaps = [\n _calculate_smallest_gap_to_adjacent(\n segment_index=x,\n segments=segments,\n segments_alignment=segments_alignment,\n )\n for x in unaligned\n ]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[segment_to_fix_index]\n\n # abort if only isolated segments are left\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[cur_segment_index]]\n\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][:, adjacent_alignment]\n\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0] # first frame of cur segment\n closest_known_skeleton = adjacent_segment_skeleton[-1] # last frame of prev segment\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1] # last frame of cur segment\n closest_known_skeleton = adjacent_segment_skeleton[0] # first frame of next segment\n else:\n raise ValueError()\n\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n\n unaligned = np.where(segments_alignment.mask)[0]\n\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:], dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(\n shuffled_results: ShuffledResults,\n original_results: OriginalResults,\n frame_rate: float,\n score_threshold,\n) -> BaseResults:\n len_series = len(shuffled_results)\n\n # Create continuous segments without jumps\n partitioned_results = _make_continuous_partitions(\n score_threshold=score_threshold,\n frame_rate=frame_rate,\n shuffled_results=shuffled_results,\n )\n segments = partitioned_results.get_segments()\n\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold},\"\n f\" stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n\n # Choose each segment global alignment by comparing with labelled data\n segments_alignment = _align_segments_with_labels(\n segments, partitioned_results.skeletons, original_results.skeletons\n )\n\n # Fix unaligned segments here by comparing skeletons with neighboring segments iteratively\n segments_alignment = _align_unlabelled_segments_with_adjacents(\n segments, segments_alignment, partitioned_results.skeletons, frame_rate\n )\n\n # Compile results\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n\n # Filter the final results again by score threshold\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores, score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons), axis=(1, 2)).sum()\n logger.info(\n f\"Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully \"\n f\"({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success}\"\n f\" or {(float(original_num_success) / len_series * 100):.1f}% of total)\"\n )\n if num_success < original_num_success:\n logger.warning(f\"Original results had {original_num_success - num_success} more successfully analyzed frames!\")\n\n return _FinalResults.from_resolved(resolved_results)\n",
"<docstring token>\nimport logging\nimport numpy as np\nimport numpy.ma as ma\nfrom wormpose.pose.distance_metrics import angle_distance, skeleton_distance\nfrom wormpose.pose.results_datatypes import BaseResults, ShuffledResults, OriginalResults\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nCONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)\nCONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2\nMIN_SEGMENT_SIZE_SEC = 0.2\nMAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\ndef _make_continuous_partitions(shuffled_results: ShuffledResults,\n score_threshold: float, frame_rate: float) ->_PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n partitioned_results = _PartitionedResults(shuffled_results)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.\n scores, axis=1), score_threshold))[0]\n for frame_index in good_score_frames:\n prev_theta = partitioned_results.theta[frame_index - min(\n time_window, frame_index):frame_index, 0]\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index,\n partition=0, new_partition=True)\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1\n ]\n dists = [angle_distance(shuffled_results.theta[frame_index, k,\n :], prev_theta[last_valid_index]) for k in range(2)]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index,\n partition=partition)\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"<docstring token>\n<import token>\nlogging.basicConfig()\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\nCONTINUOUS_ANGLES_DIST_THRESHOLD = np.deg2rad(30)\nCONTINOUS_SEGMENT_TIME_WINDOW_SEC = 0.2\nMIN_SEGMENT_SIZE_SEC = 0.2\nMAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC = 1\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\ndef _make_continuous_partitions(shuffled_results: ShuffledResults,\n score_threshold: float, frame_rate: float) ->_PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n partitioned_results = _PartitionedResults(shuffled_results)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.\n scores, axis=1), score_threshold))[0]\n for frame_index in good_score_frames:\n prev_theta = partitioned_results.theta[frame_index - min(\n time_window, frame_index):frame_index, 0]\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index,\n partition=0, new_partition=True)\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1\n ]\n dists = [angle_distance(shuffled_results.theta[frame_index, k,\n :], prev_theta[last_valid_index]) for k in range(2)]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index,\n partition=partition)\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"<docstring token>\n<import token>\nlogging.basicConfig()\n<assignment token>\nlogger.setLevel(logging.DEBUG)\n<assignment token>\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\ndef _make_continuous_partitions(shuffled_results: ShuffledResults,\n score_threshold: float, frame_rate: float) ->_PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n partitioned_results = _PartitionedResults(shuffled_results)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.\n scores, axis=1), score_threshold))[0]\n for frame_index in good_score_frames:\n prev_theta = partitioned_results.theta[frame_index - min(\n time_window, frame_index):frame_index, 0]\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index,\n partition=0, new_partition=True)\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1\n ]\n dists = [angle_distance(shuffled_results.theta[frame_index, k,\n :], prev_theta[last_valid_index]) for k in range(2)]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index,\n partition=partition)\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef _init_partitioned_series(shuffled_series: np.ndarray):\n return ma.masked_all_like(shuffled_series)\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\ndef _make_continuous_partitions(shuffled_results: ShuffledResults,\n score_threshold: float, frame_rate: float) ->_PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n partitioned_results = _PartitionedResults(shuffled_results)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.\n scores, axis=1), score_threshold))[0]\n for frame_index in good_score_frames:\n prev_theta = partitioned_results.theta[frame_index - min(\n time_window, frame_index):frame_index, 0]\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index,\n partition=0, new_partition=True)\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1\n ]\n dists = [angle_distance(shuffled_results.theta[frame_index, k,\n :], prev_theta[last_valid_index]) for k in range(2)]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index,\n partition=partition)\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\ndef _make_continuous_partitions(shuffled_results: ShuffledResults,\n score_threshold: float, frame_rate: float) ->_PartitionedResults:\n time_window = max(1, int(frame_rate * CONTINOUS_SEGMENT_TIME_WINDOW_SEC))\n min_segment_size = max(1, int(frame_rate * MIN_SEGMENT_SIZE_SEC))\n partitioned_results = _PartitionedResults(shuffled_results)\n good_score_frames = np.where(ma.greater_equal(ma.max(shuffled_results.\n scores, axis=1), score_threshold))[0]\n for frame_index in good_score_frames:\n prev_theta = partitioned_results.theta[frame_index - min(\n time_window, frame_index):frame_index, 0]\n if np.all(np.any(prev_theta.mask, axis=1)):\n partitioned_results.set_partition(frame_index=frame_index,\n partition=0, new_partition=True)\n else:\n last_valid_index = np.where(~np.any(prev_theta.mask, axis=1))[0][-1\n ]\n dists = [angle_distance(shuffled_results.theta[frame_index, k,\n :], prev_theta[last_valid_index]) for k in range(2)]\n partition = int(np.argmin(dists))\n if dists[partition] < CONTINUOUS_ANGLES_DIST_THRESHOLD:\n partitioned_results.set_partition(frame_index=frame_index,\n partition=partition)\n for cur_partition_indices in partitioned_results.get_segments():\n if len(cur_partition_indices) < min_segment_size:\n partitioned_results.mask(cur_partition_indices)\n return partitioned_results\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\ndef _init_unified_series(mixed_series):\n return ma.masked_all((mixed_series.shape[0],) + mixed_series.shape[2:],\n dtype=mixed_series.dtype)\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\n<function token>\n\n\ndef resolve_head_tail(shuffled_results: ShuffledResults, original_results:\n OriginalResults, frame_rate: float, score_threshold) ->BaseResults:\n len_series = len(shuffled_results)\n partitioned_results = _make_continuous_partitions(score_threshold=\n score_threshold, frame_rate=frame_rate, shuffled_results=\n shuffled_results)\n segments = partitioned_results.get_segments()\n if len(segments) == 0:\n logger.error(\n f\"Couldn't find any continuous segments of predicted data above the threshold {score_threshold}, stopping analysis.\"\n )\n return _FinalResults.from_shuffled(shuffled_results)\n segments_alignment = _align_segments_with_labels(segments,\n partitioned_results.skeletons, original_results.skeletons)\n segments_alignment = _align_unlabelled_segments_with_adjacents(segments,\n segments_alignment, partitioned_results.skeletons, frame_rate)\n resolved_results = _ResolvedResults(partitioned_results)\n for segment, segment_alignment in zip(segments, segments_alignment):\n if not ma.is_masked(segment_alignment):\n resolved_results.resolve(segment, segment_alignment)\n low_scores_indices = np.where(ma.masked_less(resolved_results.scores,\n score_threshold).mask)[0]\n resolved_results.mask(low_scores_indices)\n num_success = resolved_results.num_valid()\n original_num_success = np.any(~np.isnan(original_results.skeletons),\n axis=(1, 2)).sum()\n logger.info(\n f'Resolved head/tail, {num_success} out of {len_series} frames analyzed successfully ({float(num_success) / len_series * 100:.1f}%) (original features : {original_num_success} or {float(original_num_success) / len_series * 100:.1f}% of total)'\n )\n if num_success < original_num_success:\n logger.warning(\n f'Original results had {original_num_success - num_success} more successfully analyzed frames!'\n )\n return _FinalResults.from_resolved(resolved_results)\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\ndef _align_unlabelled_segments_with_adjacents(segments, segments_alignment,\n partitioned_skeletons, frame_rate: float):\n \"\"\"\n Resolve the unaligned segments by comparing with adjacent segments,\n starting with the segments that have the least frames gap between an adjacent trusted segment\n Don't align isolated segments which a big gap between trusted segments\n \"\"\"\n maximum_gap_allowed = max(1, int(frame_rate *\n MAXIMUM_GAP_ALLOWED_WITH_ADJACENT_SEGMENT_SEC))\n if np.all(segments_alignment.mask):\n logger.info(\n 'There are no trusted segments with head decision to resolve the whole video, stopping analysis.'\n )\n return segments_alignment\n unaligned = np.where(segments_alignment.mask)[0]\n while len(unaligned) > 0:\n all_gaps = [_calculate_smallest_gap_to_adjacent(segment_index=x,\n segments=segments, segments_alignment=segments_alignment) for x in\n unaligned]\n segment_to_fix_index = np.nanargmin(all_gaps, axis=0)[0]\n gap_to_adjacent_segment, adjacent_segment_offset = all_gaps[\n segment_to_fix_index]\n if gap_to_adjacent_segment > maximum_gap_allowed:\n break\n cur_segment_index = unaligned[segment_to_fix_index]\n cur_segment_skeleton = partitioned_skeletons[segments[\n cur_segment_index]]\n adjacent_segment_index = cur_segment_index + adjacent_segment_offset\n adjacent_alignment = segments_alignment[adjacent_segment_index]\n adjacent_segment = segments[adjacent_segment_index]\n adjacent_segment_skeleton = partitioned_skeletons[adjacent_segment][\n :, adjacent_alignment]\n if adjacent_segment_offset == -1:\n closest_unaligned_skeleton = cur_segment_skeleton[0]\n closest_known_skeleton = adjacent_segment_skeleton[-1]\n elif adjacent_segment_offset == 1:\n closest_unaligned_skeleton = cur_segment_skeleton[-1]\n closest_known_skeleton = adjacent_segment_skeleton[0]\n else:\n raise ValueError()\n dists = [skeleton_distance(closest_known_skeleton, skel) for skel in\n closest_unaligned_skeleton]\n segments_alignment[cur_segment_index] = int(np.argmax(dists))\n unaligned = np.where(segments_alignment.mask)[0]\n return segments_alignment\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n\n\ndef _align_segments_with_labels(segments, partitioned_skeletons,\n labelled_skeletons, min_labelled=5):\n \"\"\"\n Match the head/tail alignment with the results of the classical tracking in each of the segments,\n if there is enough labelled data in the segment\n \"\"\"\n segments_alignment = ma.masked_all((len(segments),), dtype=np.uint8)\n for segment_index, segment in enumerate(segments):\n segment_skeletons = labelled_skeletons[segment]\n non_nan_labelled = np.any(~np.isnan(segment_skeletons), axis=(1, 2))\n labels_count = np.sum(non_nan_labelled)\n non_masked = ~np.any(partitioned_skeletons[segment].mask, axis=(1, \n 2, 3))\n to_compare = np.logical_and(non_nan_labelled, non_masked)\n similarity_scores = []\n for label_skel, partitioned_skeleton in zip(segment_skeletons[\n to_compare], partitioned_skeletons[segment][to_compare]):\n dists = [skeleton_distance(label_skel, x) for x in\n partitioned_skeleton]\n similarity_scores.append(dists)\n if len(similarity_scores) > 0:\n mean_similarity_scores = np.mean(similarity_scores, axis=0)\n if mean_similarity_scores[0] * mean_similarity_scores[1\n ] < 0 and labels_count > min_labelled:\n segments_alignment[segment_index] = np.argmax(\n mean_similarity_scores)\n return segments_alignment\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef _set_partition(partitioned_series, shuffled_series, frame_index: int,\n partition: int):\n partitioned_series[frame_index][0] = shuffled_series[frame_index, partition\n ]\n partitioned_series[frame_index][1] = shuffled_series[frame_index, 1 -\n partition]\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n\n\ndef _calculate_smallest_gap_to_adjacent(segment_index, segments,\n segments_alignment):\n score = np.nan\n segment_offset = np.nan\n if segment_index - 1 >= 0 and not segments_alignment.mask[segment_index - 1\n ]:\n gap = segments[segment_index][0] - segments[segment_index - 1][-1]\n score = gap\n segment_offset = -1\n if segment_index + 1 < len(segments_alignment\n ) and not segments_alignment.mask[segment_index + 1]:\n gap = segments[segment_index + 1][0] - segments[segment_index][-1]\n if np.isnan(score) or gap < score:\n score = gap\n segment_offset = 1\n return score, segment_offset\n\n\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n self.partitions.mask[indices] = True\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n <function token>\n\n def set_partition(self, frame_index: int, partition: int, new_partition:\n bool=False):\n if new_partition:\n self.cur_partition += 1\n _set_partition(self.theta, self._shuffled_results.theta,\n frame_index, partition)\n _set_partition(self.skeletons, self._shuffled_results.skeletons,\n frame_index, partition)\n _set_partition(self.scores, self._shuffled_results.scores,\n frame_index, partition)\n self.partitions[frame_index] = self.cur_partition\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n <function token>\n <function token>\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n\n def get_segments(self):\n all_partitions_indexes = np.unique(self.partitions.filled(-1))\n return [self._get_partition_indices(partition_index) for\n partition_index in all_partitions_indexes if partition_index >= 0]\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass _PartitionedResults(BaseResults):\n\n def __init__(self, shuffled_results: ShuffledResults):\n self.cur_partition = -1\n self.partitions = ma.masked_all((len(shuffled_results),), dtype=int)\n self._shuffled_results = shuffled_results\n theta = _init_partitioned_series(shuffled_results.theta)\n skeletons = _init_partitioned_series(shuffled_results.skeletons)\n scores = _init_partitioned_series(shuffled_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n <function token>\n <function token>\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n <function token>\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass _PartitionedResults(BaseResults):\n <function token>\n <function token>\n <function token>\n\n def _get_partition_indices(self, partition_index: int):\n return np.where(self.partitions == partition_index)[0]\n <function token>\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass _PartitionedResults(BaseResults):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n\n def mask(self, indices):\n self.theta.mask[indices] = True\n self.skeletons.mask[indices] = True\n self.scores.mask[indices] = True\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n\n\nclass _ResolvedResults(BaseResults):\n\n def __init__(self, partitioned_results: _PartitionedResults):\n self._partitioned_results = partitioned_results\n theta = _init_unified_series(partitioned_results.theta)\n skeletons = _init_unified_series(partitioned_results.skeletons)\n scores = _init_unified_series(partitioned_results.scores)\n super().__init__(theta=theta, skeletons=skeletons, scores=scores)\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n <function token>\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n\n\nclass _ResolvedResults(BaseResults):\n <function token>\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n <function token>\n\n def num_valid(self):\n return np.sum(~self.scores.mask)\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n\n\nclass _ResolvedResults(BaseResults):\n <function token>\n\n def resolve(self, segment, segment_alignment):\n self.scores[segment] = self._partitioned_results.scores[segment][:,\n segment_alignment]\n self.skeletons[segment] = self._partitioned_results.skeletons[segment][\n :, segment_alignment]\n self.theta[segment] = self._partitioned_results.theta[segment][:,\n segment_alignment]\n <function token>\n <function token>\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n\n\nclass _ResolvedResults(BaseResults):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n\n @classmethod\n def from_shuffled(cls, shuffled_results: ShuffledResults):\n return _FinalResults(theta=np.full_like(shuffled_results.theta[:, 0\n ], np.nan), skeletons=np.full_like(shuffled_results.scores[:, 0\n ], np.nan), scores=np.full_like(shuffled_results.skeletons[:, 0\n ], np.nan))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass _FinalResults(BaseResults):\n\n @classmethod\n def from_resolved(cls, resolved_results: _ResolvedResults):\n return _FinalResults(theta=resolved_results.theta.filled(np.nan),\n skeletons=resolved_results.skeletons.filled(np.nan), scores=\n resolved_results.scores.filled(np.nan))\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n\n\nclass _FinalResults(BaseResults):\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
518 |
ec90c731a0e546d9d399cbb68c92be1acca8cbe0
|
from package import *
class mysql(MakePackage):
dependencies = ["cmake"]
fetch="http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/"
config='cmake -G "Unix Makefiles" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'
|
[
"\nfrom package import *\n\nclass mysql(MakePackage):\n dependencies = [\"cmake\"]\n fetch=\"http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/\"\n config='cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n\n",
"from package import *\n\n\nclass mysql(MakePackage):\n dependencies = ['cmake']\n fetch = (\n 'http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/'\n )\n config = (\n 'cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n )\n",
"<import token>\n\n\nclass mysql(MakePackage):\n dependencies = ['cmake']\n fetch = (\n 'http://dev.mysql.com/get/Downloads/MySQL-5.6/mysql-5.6.10.tar.gz/from/http://cdn.mysql.com/'\n )\n config = (\n 'cmake -G \"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX=%(prefix)s -DWITH_READLINE=1'\n )\n",
"<import token>\n\n\nclass mysql(MakePackage):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
519 |
372d8c8cb9ec8f579db8588aff7799c73c5af255
|
#!/home/nick/.virtualenvs/twitterbots/bin/python3.5
# -*- coding: utf-8 -*-
import tweepy
import sqlite3
from configparser import ConfigParser
'''
A little OOP would be good later for
authenticated user data, c, conn, api
'''
def main():
Collector.collect()
class Collector:
# Main function
def collect():
api = Collector.get_api()
tweet_dump = Collector.all_tweet_db()
c = tweet_dump[0]
conn = tweet_dump[1]
last_list = Collector.last_tweets(c, conn)
# Look for new friends, add to db
new_friends = Collector.new_f_check(api, c)
Collector.download_to_limit(api, c, conn, new_friends)
# Checks timelines of everyone in db already
# adds anything new to db
Collector.download_recent(api, c, conn, last_list)
def get_api():
parser = ConfigParser()
parser.read('twitter_auth.ini')
consumer_key = parser.get('Keys',
'consumer_key').strip("'")
consumer_secret = parser.get('Secrets',
'consumer_secret').strip("'")
access_token = parser.get('Tokens',
'access_token').strip("'")
access_token_secret = parser.get('Secrets',
'access_token_secret').strip("'")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True)
return api
# connects to tweet_dump.db creates tdump if not exists
# tdump stores all tweets from anyone in list
def all_tweet_db():
conn = sqlite3.connect('tweet_dump_main.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS tdump
(tweet TEXT,
username TEXT,
tweet_date TEXT,
tweet_id TEXT,
tweet_source TEXT,
user_id TEXT)''')
return c, conn
# connects to tweet_dump.db creats served if not exists
# served stores tweets that are mention authenticated user
def mention_tweet_db():
conn = sqlite3.connect('tweet_dump_main.db')
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS mentioned
(tweet TEXT,
username TEXT,
tweet_date TEXT,
tweet_id TEXT,
tweet_source TEXT,
user_id TEXT)''')
return c, conn
# looks for new friends by comparing authenticated
# user's friend list with list of friends in tdump
def new_f_check(api, c):
# get list of user's ids
c.execute('SELECT user_id FROM tdump')
users = c.fetchall()
users = list(set([user[0] for user in users]))
# get list of friends_ids from twitter
friends_ids = api.friends_ids()
new_friends = [x for x in friends_ids if str(x) not in users]
return new_friends
# downloads up to 3200 of a user's most
# recent tweets commits to tdump
def download_to_limit(api, c, conn, friend_list):
# List of tweet ids already in db
c.execute('SELECT tweet_id FROM tdump')
tweet_ids = c.fetchall()
tweet_ids = [e[0] for e in tweet_ids]
new_tweets = []
for friend in friend_list:
try:
# try to get most recent 200 tweets from friend
get_tweets = api.user_timeline(id=friend, count=200)
except Exception as e:
continue
# add to list of all of this friend's tweets
new_tweets.extend(get_tweets)
# find oldest retrieved tweet's id number less 1
oldest = new_tweets[-1].id - 1
# get tweets until 3200 limit hit
while len(get_tweets) > 0:
try:
# max_id arg looks for id's less than arg's value
get_tweets = api.user_timeline(id=friend,
count=200,
max_id=oldest)
except Exception as e:
continue
new_tweets.extend(get_tweets)
oldest = new_tweets[-1].id - 1
if len(new_tweets) != 0:
print('Insert Active')
for tweet in new_tweets:
c.execute('''INSERT INTO tdump
(tweet,
username,
tweet_date,
tweet_id,
tweet_source,
user_id)
VALUES(?,?,?,?,?,?)''',
[tweet.text,
tweet.user.screen_name,
tweet.created_at,
tweet.id_str,
tweet.source,
tweet.user.id_str])
conn.commit()
if len(new_tweets) != 0:
print('Insert Done' + '\n')
# simply check if tweet text contains my screen name
# change from hard code later
def mention_me(new_tweet_list, c, conn):
mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]
if len(new_tweet_list) != 0:
print('Insert Active')
for tweet in mentioned:
c.execute('''INSERT INTO served
(tweet,
username,
tweet_date,
tweet_id,
tweet_source,
user_id)
VALUES(?,?,?,?,?,?)''',
[tweet.text,
tweet.user.screen_name,
tweet.created_at,
tweet.id_str,
tweet.source,
tweet.user.id_str])
conn.commit()
if len(new_tweet_list) != 0:
print('Insert Done' + '\n')
# returns list of user_id and created_at pairs
# date associated with user_id is date of last
# tweet in database
def last_tweets(c, conn):
# list of user ids and the date of the
# last tweet in db
user_last_tweets = []
# get list of user's ids
c.execute('SELECT user_id FROM tdump')
users = c.fetchall()
users = list(set([user[0] for user in users]))
for user in users:
c.execute('''SELECT user_id, tweet_id
FROM tdump
WHERE user_id = ?
ORDER BY tweet_date DESC''',
[user])
last_tweet = c.fetchone()
user_last_tweets.append(last_tweet)
return user_last_tweets
# downloads most recent posts in each users timelines
def download_recent(api, c, conn, last_tweets):
c.execute('SELECT tweet_id FROM tdump')
tweet_ids = [x[0] for x in c.fetchall()]
new_tweets = []
for pair in last_tweets:
user_id = pair[0]
tweet_id = pair[1]
try:
get_tweets = api.user_timeline(id=user_id,
since_id=tweet_id,
count=200)
except Exception:
continue
if len(get_tweets) != 0:
# add to list of all of this friend's tweets
new_tweets.extend(get_tweets)
# find newest retrieved tweet's id number plus 1
newest = get_tweets[0].id + 1
while len(get_tweets) > 0:
try:
# max_id arg looks for id's less than arg's value
get_tweets = api.user_timeline(id=user_id,
count=200,
since_id=newest)
new_tweets.extend(get_tweets)
newest = get_tweets[0].id + 1
except Exception:
continue
if len(new_tweets) != 0:
print('Insert Active')
for tweet in new_tweets:
if tweet.user.screen_name != 'BonneNick' \
and tweet.id not in tweet_ids:
c.execute('''INSERT INTO tdump
(tweet,
username,
tweet_date,
tweet_id,
tweet_source,
user_id)
VALUES(?,?,?,?,?,?)''',
[tweet.text,
tweet.user.screen_name,
tweet.created_at,
tweet.id_str,
tweet.source,
tweet.user.id_str])
conn.commit()
conn.close()
if len(new_tweets) != 0:
print('Insert Done' + '\n')
if __name__ == '__main__':
main()
|
[
"#!/home/nick/.virtualenvs/twitterbots/bin/python3.5\n# -*- coding: utf-8 -*-\n\nimport tweepy\nimport sqlite3\n\nfrom configparser import ConfigParser\n\n'''\nA little OOP would be good later for\nauthenticated user data, c, conn, api\n'''\n\n\ndef main():\n\n Collector.collect()\n\n\nclass Collector:\n\n # Main function\n def collect():\n\n api = Collector.get_api()\n\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n\n # Look for new friends, add to db\n new_friends = Collector.new_f_check(api, c)\n\n Collector.download_to_limit(api, c, conn, new_friends)\n\n # Checks timelines of everyone in db already\n # adds anything new to db\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys',\n 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets',\n 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens',\n 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets',\n 'access_token_secret').strip(\"'\")\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n\n return api\n\n # connects to tweet_dump.db creates tdump if not exists\n # tdump stores all tweets from anyone in list\n def all_tweet_db():\n\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n\n c.execute('''CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)''')\n\n return c, conn\n\n # connects to tweet_dump.db creats served if not exists\n # served stores tweets that are mention authenticated user\n def mention_tweet_db():\n\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n\n c.execute('''CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)''')\n\n return c, conn\n\n # looks for new friends by comparing authenticated\n # user's friend list with list of friends in tdump\n def new_f_check(api, c):\n\n # get list of user's ids\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n\n # get list of friends_ids from twitter\n friends_ids = api.friends_ids()\n\n new_friends = [x for x in friends_ids if str(x) not in users]\n\n return new_friends\n\n # downloads up to 3200 of a user's most\n # recent tweets commits to tdump\n def download_to_limit(api, c, conn, friend_list):\n\n # List of tweet ids already in db\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n\n new_tweets = []\n\n for friend in friend_list:\n\n try:\n # try to get most recent 200 tweets from friend\n get_tweets = api.user_timeline(id=friend, count=200)\n\n except Exception as e:\n\n continue\n\n # add to list of all of this friend's tweets\n new_tweets.extend(get_tweets)\n\n # find oldest retrieved tweet's id number less 1\n oldest = new_tweets[-1].id - 1\n\n # get tweets until 3200 limit hit\n while len(get_tweets) > 0:\n\n try:\n # max_id arg looks for id's less than arg's value\n get_tweets = api.user_timeline(id=friend,\n count=200,\n max_id=oldest)\n\n except Exception as e:\n\n continue\n\n new_tweets.extend(get_tweets)\n\n oldest = new_tweets[-1].id - 1\n\n if len(new_tweets) != 0:\n\n print('Insert Active')\n\n for tweet in new_tweets:\n\n c.execute('''INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n\n if len(new_tweets) != 0:\n\n print('Insert Done' + '\\n')\n\n # simply check if tweet text contains my screen name\n # change from hard code later\n def mention_me(new_tweet_list, c, conn):\n\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n\n if len(new_tweet_list) != 0:\n\n print('Insert Active')\n\n for tweet in mentioned:\n\n c.execute('''INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n\n if len(new_tweet_list) != 0:\n\n print('Insert Done' + '\\n')\n\n # returns list of user_id and created_at pairs\n # date associated with user_id is date of last\n # tweet in database\n def last_tweets(c, conn):\n\n # list of user ids and the date of the\n # last tweet in db\n user_last_tweets = []\n\n # get list of user's ids\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n\n for user in users:\n\n c.execute('''SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC''',\n [user])\n\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n\n return user_last_tweets\n\n # downloads most recent posts in each users timelines\n def download_recent(api, c, conn, last_tweets):\n\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n\n new_tweets = []\n\n for pair in last_tweets:\n\n user_id = pair[0]\n tweet_id = pair[1]\n\n try:\n\n get_tweets = api.user_timeline(id=user_id,\n since_id=tweet_id,\n count=200)\n\n except Exception:\n\n continue\n\n if len(get_tweets) != 0:\n\n # add to list of all of this friend's tweets\n new_tweets.extend(get_tweets)\n\n # find newest retrieved tweet's id number plus 1\n newest = get_tweets[0].id + 1\n\n while len(get_tweets) > 0:\n\n try:\n # max_id arg looks for id's less than arg's value\n get_tweets = api.user_timeline(id=user_id,\n count=200,\n since_id=newest)\n\n new_tweets.extend(get_tweets)\n\n newest = get_tweets[0].id + 1\n\n except Exception:\n\n continue\n\n if len(new_tweets) != 0:\n\n print('Insert Active')\n\n for tweet in new_tweets:\n\n if tweet.user.screen_name != 'BonneNick' \\\n and tweet.id not in tweet_ids:\n\n c.execute('''INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)''',\n [tweet.text,\n tweet.user.screen_name,\n tweet.created_at,\n tweet.id_str,\n tweet.source,\n tweet.user.id_str])\n\n conn.commit()\n conn.close()\n\n if len(new_tweets) != 0:\n\n print('Insert Done' + '\\n')\n\n\nif __name__ == '__main__':\n\n main()\n",
"import tweepy\nimport sqlite3\nfrom configparser import ConfigParser\n<docstring token>\n\n\ndef main():\n Collector.collect()\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<docstring token>\n\n\ndef main():\n Collector.collect()\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n<docstring token>\n\n\ndef main():\n Collector.collect()\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n\n def last_tweets(c, conn):\n user_last_tweets = []\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n for user in users:\n c.execute(\n \"\"\"SELECT user_id, tweet_id\n FROM tdump\n WHERE user_id = ?\n ORDER BY tweet_date DESC\"\"\"\n , [user])\n last_tweet = c.fetchone()\n user_last_tweets.append(last_tweet)\n return user_last_tweets\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n\n def get_api():\n parser = ConfigParser()\n parser.read('twitter_auth.ini')\n consumer_key = parser.get('Keys', 'consumer_key').strip(\"'\")\n consumer_secret = parser.get('Secrets', 'consumer_secret').strip(\"'\")\n access_token = parser.get('Tokens', 'access_token').strip(\"'\")\n access_token_secret = parser.get('Secrets', 'access_token_secret'\n ).strip(\"'\")\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n return api\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n <function token>\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n <function token>\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n\n def download_to_limit(api, c, conn, friend_list):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = c.fetchall()\n tweet_ids = [e[0] for e in tweet_ids]\n new_tweets = []\n for friend in friend_list:\n try:\n get_tweets = api.user_timeline(id=friend, count=200)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=friend, count=200,\n max_id=oldest)\n except Exception as e:\n continue\n new_tweets.extend(get_tweets)\n oldest = new_tweets[-1].id - 1\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n <function token>\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n <function token>\n\n def all_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS tdump\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n <function token>\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n <function token>\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n <function token>\n <function token>\n\n def mention_tweet_db():\n conn = sqlite3.connect('tweet_dump_main.db')\n c = conn.cursor()\n c.execute(\n \"\"\"CREATE TABLE IF NOT EXISTS mentioned\n (tweet TEXT,\n username TEXT,\n tweet_date TEXT,\n tweet_id TEXT,\n tweet_source TEXT,\n user_id TEXT)\"\"\"\n )\n return c, conn\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n <function token>\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n <function token>\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n <function token>\n <function token>\n <function token>\n\n def new_f_check(api, c):\n c.execute('SELECT user_id FROM tdump')\n users = c.fetchall()\n users = list(set([user[0] for user in users]))\n friends_ids = api.friends_ids()\n new_friends = [x for x in friends_ids if str(x) not in users]\n return new_friends\n <function token>\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n <function token>\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n <function token>\n\n def download_recent(api, c, conn, last_tweets):\n c.execute('SELECT tweet_id FROM tdump')\n tweet_ids = [x[0] for x in c.fetchall()]\n new_tweets = []\n for pair in last_tweets:\n user_id = pair[0]\n tweet_id = pair[1]\n try:\n get_tweets = api.user_timeline(id=user_id, since_id=\n tweet_id, count=200)\n except Exception:\n continue\n if len(get_tweets) != 0:\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n while len(get_tweets) > 0:\n try:\n get_tweets = api.user_timeline(id=user_id, count=\n 200, since_id=newest)\n new_tweets.extend(get_tweets)\n newest = get_tweets[0].id + 1\n except Exception:\n continue\n if len(new_tweets) != 0:\n print('Insert Active')\n for tweet in new_tweets:\n if (tweet.user.screen_name != 'BonneNick' and tweet.id not in\n tweet_ids):\n c.execute(\n \"\"\"INSERT INTO tdump\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n conn.close()\n if len(new_tweets) != 0:\n print('Insert Done' + '\\n')\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n\n def collect():\n api = Collector.get_api()\n tweet_dump = Collector.all_tweet_db()\n c = tweet_dump[0]\n conn = tweet_dump[1]\n last_list = Collector.last_tweets(c, conn)\n new_friends = Collector.new_f_check(api, c)\n Collector.download_to_limit(api, c, conn, new_friends)\n Collector.download_recent(api, c, conn, last_list)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def mention_me(new_tweet_list, c, conn):\n mentioned = [x for x in new_tweet_list if '@BonneNick' in x[0]]\n if len(new_tweet_list) != 0:\n print('Insert Active')\n for tweet in mentioned:\n c.execute(\n \"\"\"INSERT INTO served\n (tweet,\n username,\n tweet_date,\n tweet_id,\n tweet_source,\n user_id)\n VALUES(?,?,?,?,?,?)\"\"\"\n , [tweet.text, tweet.user.screen_name, tweet.created_at,\n tweet.id_str, tweet.source, tweet.user.id_str])\n conn.commit()\n if len(new_tweet_list) != 0:\n print('Insert Done' + '\\n')\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n\n\nclass Collector:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<docstring token>\n<function token>\n<class token>\n<code token>\n"
] | false |
520 |
8dfef0a4525328be8dfb4723f0a168dc22eb5eb2
|
#!/usr/bin/env python
"""
This is a Cog used to display processes/ programs running on the client to a discord text channel
Commented using reStructuredText (reST)
ToDo
create and use a database for multiple servers
"""
# Futures
# Built-in/Generic Imports
import os
import sys
import configparser
import shutil
import time
import codecs
# Libs
import discord
import psutil
from discord.ext import commands, tasks
# Own modules
__author__ = "Jack Draper"
__copyright__ = "Unofficial Copyright 2019, CyclopsBot"
__credits__ = ["Jack Draper"]
__license__ = "Developer"
__version__ = "0.0.4"
__maintainer__ = "Jack Draper"
__email__ = "[email protected]"
__status__ = "Development"
# "Prototype", "Development", or "Production"
# Constants
CONFIG_PATH = "./configs/config.ini"
DEFAULT_EMBED = discord.Embed(
title=":desktop: Program Status",
colour=discord.Colour.blue()
)
# Checks for config file
if not os.path.exists("./configs/config.ini"):
print("No config file can be found in ./configs/.")
sys.exit("No config found.")
# Runs config file
config = configparser.ConfigParser()
try:
# config.read(os.path.abspath("./configs/config.ini"))
config.read_file(codecs.open(CONFIG_PATH, "r", "utf-8-sig"))
except FileNotFoundError:
try:
# shutil.copyfile("./configs/default_config.ini", "./configs/config.ini")
print("You need to set up the config file correctly.")
except shutil.Error:
print("Something is wrong with the default config file or the config folder.")
time.sleep(4)
sys.exit()
# Config Constants
ADMIN_ROLE = config["Credentials"]["admin_role"]
TEXT_CHANNEL = int(config["ProcessDisplay"]["text_channel_id"])
PROCESSES = eval(config["ProcessDisplay"]["processes"])
class ProcessDisplay(commands.Cog):
"""
The Cog for Process Display
"""
def __init__(self, client):
"""
:param client: the bot client parsed in from the main program
"""
self.started = False
self.client = client
self.inline = False
# Events
@commands.Cog.listener()
async def on_ready(self):
"""
Ran when bot is starting up and ready
Deletes messages from the bot in the TEXTCHANNEL
starts up find_processes method
:return:
"""
if not self.started:
channel = self.client.get_channel(TEXT_CHANNEL)
await self.delete_bot_msg(channel)
msg = await channel.send(embed=DEFAULT_EMBED)
self.find_processes.start(msg)
started = True
print("ProcessDisplay Running")
# Commands
@commands.command()
@commands.has_permissions(administrator=True)
async def toggle_inline(self,ctx):
"""
Toggles inline for process controls
:param ctx: The command Context
:return:
"""
self.inline = not self.inline
@commands.command()
@commands.has_permissions(administrator=True)
async def move_process(self, direction, process_name):
"""
need to make
:param direction:
:param process_name:
:return:
"""
for i in range(len(PROCESSES)):
if PROCESSES[i] == process_name:
if direction.lower() == "up":
pass
@commands.command()
@commands.has_permissions(administrator=True)
async def add_process(self, ctx, process, name):
"""
Adds a process to the process display.
Must be different from ones currently displayed.
:param ctx: Context of the command
:param process: The process (e.g. 'cmd.exe') to be added
:param name: The name to be displayed for the process (e.g. 'Command Prompt')
:return:
"""
name = self.fix_emoji_escapes(name)
if process in PROCESSES.keys():
await ctx.send(f"The process {process} is already being displayed")
elif name in PROCESSES.values():
await ctx.send(f"The process name {name} is already being displayed")
else:
PROCESSES[process] = name
self.update_processes_config()
await ctx.send(f"The process {name} has been added")
@commands.command()
@commands.has_permissions(administrator=True)
async def remove_process(self, ctx, *name):
"""
Removes a process from the process display
:param ctx: Context of the command
:param name: Name displayed for the process (e.g. Command Prompt)
:return:
"""
print(name)
name = self.fix_emoji_escapes(" ".join(name))
complete = False
for process in PROCESSES.keys():
if PROCESSES.get(process) == name:
PROCESSES.pop(process)
self.update_processes_config()
await ctx.send(f"The process {name} has been removed")
complete = True
break
if not complete:
await ctx.send(f"The process {name} doesn't exist")
@commands.command()
@commands.has_permissions(administrator=True)
async def edit_process(self, ctx, old_name, new_name):
"""
Edits the name of a process
:param ctx: The context of the command
:param old_name: The old name of the process (to be changed)
:param new_name: The new name of the process (changed to)
:return:
"""
old_name = self.fix_emoji_escapes(old_name)
new_name = self.fix_emoji_escapes(new_name)
if old_name in PROCESSES.values():
for process in PROCESSES:
if PROCESSES.get(process) == old_name:
PROCESSES.update({process: new_name})
self.update_processes_config()
else:
await ctx.send(f"Process name {old_name} doesn't exist")
@tasks.loop(seconds=1)
async def find_processes(self, msg):
"""
The processes with statuses are attached to the msg given
:param msg: The message to be edited with the processes
:return:
"""
running_processes = []
new_embed = DEFAULT_EMBED.copy()
for proc in psutil.process_iter():
if proc.name() in PROCESSES.keys():
running_processes.append(proc.name())
elif proc.name() in ["java.exe", "javaw.exe"] and proc.cwd() in PROCESSES.keys():
running_processes.append(proc.cwd())
for process in PROCESSES:
try:
if process in running_processes:
new_embed.add_field(name=PROCESSES.get(process),
value="Online <:GreenTick:592083498534174721>", inline=self.inline)
else:
new_embed.add_field(name=PROCESSES.get(process),
value="Offline <:RedCross:592082557961633877>", inline=self.inline)
except PermissionError:
new_embed.add_field(name=PROCESSES.get(process),
value="Admin Required <:OrangeUnknown:592082676891123722>", inline=self.inline)
await msg.edit(content="", embed=new_embed)
def is_me(self, m):
"""
Checks if a messages author is the bot
:param m: tbh idk, maybe message?
:return:
"""
return m.author == self.client.user
async def delete_bot_msg(self, channel):
"""
Deletes up to the last 100 messages sent by the bot in the given channel
:param channel: The channel that will have the messages deleted
:return: the message that says how many messages were deleted
"""
await channel.purge(limit=100, check=self.is_me)
@staticmethod
def update_processes_config():
"""
Updates the processes line in the config with the current PROCESSES
:return:
"""
config.set("ProcessDisplay", "processes", str(PROCESSES))
with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:
config.write(configfile)
@staticmethod
def fix_emoji_escapes(text):
"""
Fixes emoji escapes to add the < back on
:param text: The text that needs to be checked for an escape
:return: the fixed text
"""
new_text = text.split(":")
for i in range(2, len(new_text)):
if ">" in new_text[i]:
new_text[i-2] += "<"
return ":".join(new_text)
def setup(client):
"""
Ran on setup of the Cog
:param client: The bot client
:return:
"""
client.add_cog(ProcessDisplay(client))
|
[
"#!/usr/bin/env python\n\n\"\"\"\nThis is a Cog used to display processes/ programs running on the client to a discord text channel\n\nCommented using reStructuredText (reST)\n\nToDo\n create and use a database for multiple servers\n\"\"\"\n# Futures\n\n# Built-in/Generic Imports\nimport os\nimport sys\nimport configparser\nimport shutil\nimport time\nimport codecs\n\n# Libs\nimport discord\nimport psutil\nfrom discord.ext import commands, tasks\n\n# Own modules\n\n__author__ = \"Jack Draper\"\n__copyright__ = \"Unofficial Copyright 2019, CyclopsBot\"\n__credits__ = [\"Jack Draper\"]\n__license__ = \"Developer\"\n__version__ = \"0.0.4\"\n__maintainer__ = \"Jack Draper\"\n__email__ = \"[email protected]\"\n__status__ = \"Development\"\n# \"Prototype\", \"Development\", or \"Production\"\n\n# Constants\nCONFIG_PATH = \"./configs/config.ini\"\nDEFAULT_EMBED = discord.Embed(\n title=\":desktop: Program Status\",\n colour=discord.Colour.blue()\n )\n\n\n# Checks for config file\nif not os.path.exists(\"./configs/config.ini\"):\n print(\"No config file can be found in ./configs/.\")\n sys.exit(\"No config found.\")\n# Runs config file\nconfig = configparser.ConfigParser()\ntry:\n # config.read(os.path.abspath(\"./configs/config.ini\"))\n config.read_file(codecs.open(CONFIG_PATH, \"r\", \"utf-8-sig\"))\nexcept FileNotFoundError:\n try:\n # shutil.copyfile(\"./configs/default_config.ini\", \"./configs/config.ini\")\n print(\"You need to set up the config file correctly.\")\n except shutil.Error:\n print(\"Something is wrong with the default config file or the config folder.\")\n time.sleep(4)\n sys.exit()\n\n# Config Constants\nADMIN_ROLE = config[\"Credentials\"][\"admin_role\"]\nTEXT_CHANNEL = int(config[\"ProcessDisplay\"][\"text_channel_id\"])\nPROCESSES = eval(config[\"ProcessDisplay\"][\"processes\"])\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n # Events\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print(\"ProcessDisplay Running\")\n\n # Commands\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self,ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == \"up\":\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f\"The process {process} is already being displayed\")\n elif name in PROCESSES.values():\n await ctx.send(f\"The process name {name} is already being displayed\")\n\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been added\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(\" \".join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been removed\")\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in [\"java.exe\", \"javaw.exe\"] and proc.cwd() in PROCESSES.keys():\n running_processes.append(proc.cwd())\n\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Online <:GreenTick:592083498534174721>\", inline=self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Offline <:RedCross:592082557961633877>\", inline=self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process),\n value=\"Admin Required <:OrangeUnknown:592082676891123722>\", inline=self.inline)\n await msg.edit(content=\"\", embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n\n config.set(\"ProcessDisplay\", \"processes\", str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(\":\")\n for i in range(2, len(new_text)):\n if \">\" in new_text[i]:\n new_text[i-2] += \"<\"\n return \":\".join(new_text)\n\n\ndef setup(client):\n \"\"\"\n Ran on setup of the Cog\n :param client: The bot client\n :return:\n \"\"\"\n client.add_cog(ProcessDisplay(client))\n",
"<docstring token>\nimport os\nimport sys\nimport configparser\nimport shutil\nimport time\nimport codecs\nimport discord\nimport psutil\nfrom discord.ext import commands, tasks\n__author__ = 'Jack Draper'\n__copyright__ = 'Unofficial Copyright 2019, CyclopsBot'\n__credits__ = ['Jack Draper']\n__license__ = 'Developer'\n__version__ = '0.0.4'\n__maintainer__ = 'Jack Draper'\n__email__ = '[email protected]'\n__status__ = 'Development'\nCONFIG_PATH = './configs/config.ini'\nDEFAULT_EMBED = discord.Embed(title=':desktop: Program Status', colour=\n discord.Colour.blue())\nif not os.path.exists('./configs/config.ini'):\n print('No config file can be found in ./configs/.')\n sys.exit('No config found.')\nconfig = configparser.ConfigParser()\ntry:\n config.read_file(codecs.open(CONFIG_PATH, 'r', 'utf-8-sig'))\nexcept FileNotFoundError:\n try:\n print('You need to set up the config file correctly.')\n except shutil.Error:\n print(\n 'Something is wrong with the default config file or the config folder.'\n )\n time.sleep(4)\n sys.exit()\nADMIN_ROLE = config['Credentials']['admin_role']\nTEXT_CHANNEL = int(config['ProcessDisplay']['text_channel_id'])\nPROCESSES = eval(config['ProcessDisplay']['processes'])\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\ndef setup(client):\n \"\"\"\n Ran on setup of the Cog\n :param client: The bot client\n :return:\n \"\"\"\n client.add_cog(ProcessDisplay(client))\n",
"<docstring token>\n<import token>\n__author__ = 'Jack Draper'\n__copyright__ = 'Unofficial Copyright 2019, CyclopsBot'\n__credits__ = ['Jack Draper']\n__license__ = 'Developer'\n__version__ = '0.0.4'\n__maintainer__ = 'Jack Draper'\n__email__ = '[email protected]'\n__status__ = 'Development'\nCONFIG_PATH = './configs/config.ini'\nDEFAULT_EMBED = discord.Embed(title=':desktop: Program Status', colour=\n discord.Colour.blue())\nif not os.path.exists('./configs/config.ini'):\n print('No config file can be found in ./configs/.')\n sys.exit('No config found.')\nconfig = configparser.ConfigParser()\ntry:\n config.read_file(codecs.open(CONFIG_PATH, 'r', 'utf-8-sig'))\nexcept FileNotFoundError:\n try:\n print('You need to set up the config file correctly.')\n except shutil.Error:\n print(\n 'Something is wrong with the default config file or the config folder.'\n )\n time.sleep(4)\n sys.exit()\nADMIN_ROLE = config['Credentials']['admin_role']\nTEXT_CHANNEL = int(config['ProcessDisplay']['text_channel_id'])\nPROCESSES = eval(config['ProcessDisplay']['processes'])\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\ndef setup(client):\n \"\"\"\n Ran on setup of the Cog\n :param client: The bot client\n :return:\n \"\"\"\n client.add_cog(ProcessDisplay(client))\n",
"<docstring token>\n<import token>\n<assignment token>\nif not os.path.exists('./configs/config.ini'):\n print('No config file can be found in ./configs/.')\n sys.exit('No config found.')\n<assignment token>\ntry:\n config.read_file(codecs.open(CONFIG_PATH, 'r', 'utf-8-sig'))\nexcept FileNotFoundError:\n try:\n print('You need to set up the config file correctly.')\n except shutil.Error:\n print(\n 'Something is wrong with the default config file or the config folder.'\n )\n time.sleep(4)\n sys.exit()\n<assignment token>\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\ndef setup(client):\n \"\"\"\n Ran on setup of the Cog\n :param client: The bot client\n :return:\n \"\"\"\n client.add_cog(ProcessDisplay(client))\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\ndef setup(client):\n \"\"\"\n Ran on setup of the Cog\n :param client: The bot client\n :return:\n \"\"\"\n client.add_cog(ProcessDisplay(client))\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass ProcessDisplay(commands.Cog):\n \"\"\"\n The Cog for Process Display\n \"\"\"\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass ProcessDisplay(commands.Cog):\n <docstring token>\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n\n @staticmethod\n def update_processes_config():\n \"\"\"\n Updates the processes line in the config with the current PROCESSES\n :return:\n \"\"\"\n config.set('ProcessDisplay', 'processes', str(PROCESSES))\n with open(CONFIG_PATH, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass ProcessDisplay(commands.Cog):\n <docstring token>\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n <function token>\n\n @staticmethod\n def fix_emoji_escapes(text):\n \"\"\"\n Fixes emoji escapes to add the < back on\n :param text: The text that needs to be checked for an escape\n :return: the fixed text\n \"\"\"\n new_text = text.split(':')\n for i in range(2, len(new_text)):\n if '>' in new_text[i]:\n new_text[i - 2] += '<'\n return ':'.join(new_text)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass ProcessDisplay(commands.Cog):\n <docstring token>\n\n def __init__(self, client):\n \"\"\"\n :param client: the bot client parsed in from the main program\n \"\"\"\n self.started = False\n self.client = client\n self.inline = False\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n <function token>\n <function token>\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass ProcessDisplay(commands.Cog):\n <docstring token>\n <function token>\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n\n def is_me(self, m):\n \"\"\"\n Checks if a messages author is the bot\n :param m: tbh idk, maybe message?\n :return:\n \"\"\"\n return m.author == self.client.user\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n <function token>\n <function token>\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass ProcessDisplay(commands.Cog):\n <docstring token>\n <function token>\n\n @commands.Cog.listener()\n async def on_ready(self):\n \"\"\"\n Ran when bot is starting up and ready\n Deletes messages from the bot in the TEXTCHANNEL\n starts up find_processes method\n :return:\n \"\"\"\n if not self.started:\n channel = self.client.get_channel(TEXT_CHANNEL)\n await self.delete_bot_msg(channel)\n msg = await channel.send(embed=DEFAULT_EMBED)\n self.find_processes.start(msg)\n started = True\n print('ProcessDisplay Running')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def toggle_inline(self, ctx):\n \"\"\"\n Toggles inline for process controls\n\n :param ctx: The command Context\n :return:\n \"\"\"\n self.inline = not self.inline\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def move_process(self, direction, process_name):\n \"\"\"\n need to make\n :param direction:\n :param process_name:\n :return:\n \"\"\"\n for i in range(len(PROCESSES)):\n if PROCESSES[i] == process_name:\n if direction.lower() == 'up':\n pass\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def add_process(self, ctx, process, name):\n \"\"\"\n Adds a process to the process display.\n Must be different from ones currently displayed.\n\n :param ctx: Context of the command\n :param process: The process (e.g. 'cmd.exe') to be added\n :param name: The name to be displayed for the process (e.g. 'Command Prompt')\n :return:\n \"\"\"\n name = self.fix_emoji_escapes(name)\n if process in PROCESSES.keys():\n await ctx.send(f'The process {process} is already being displayed')\n elif name in PROCESSES.values():\n await ctx.send(\n f'The process name {name} is already being displayed')\n else:\n PROCESSES[process] = name\n self.update_processes_config()\n await ctx.send(f'The process {name} has been added')\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def remove_process(self, ctx, *name):\n \"\"\"\n Removes a process from the process display\n\n :param ctx: Context of the command\n :param name: Name displayed for the process (e.g. Command Prompt)\n :return:\n \"\"\"\n print(name)\n name = self.fix_emoji_escapes(' '.join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f'The process {name} has been removed')\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")\n\n @commands.command()\n @commands.has_permissions(administrator=True)\n async def edit_process(self, ctx, old_name, new_name):\n \"\"\"\n Edits the name of a process\n :param ctx: The context of the command\n :param old_name: The old name of the process (to be changed)\n :param new_name: The new name of the process (changed to)\n :return:\n \"\"\"\n old_name = self.fix_emoji_escapes(old_name)\n new_name = self.fix_emoji_escapes(new_name)\n if old_name in PROCESSES.values():\n for process in PROCESSES:\n if PROCESSES.get(process) == old_name:\n PROCESSES.update({process: new_name})\n self.update_processes_config()\n else:\n await ctx.send(f\"Process name {old_name} doesn't exist\")\n\n @tasks.loop(seconds=1)\n async def find_processes(self, msg):\n \"\"\"\n The processes with statuses are attached to the msg given\n\n :param msg: The message to be edited with the processes\n :return:\n \"\"\"\n running_processes = []\n new_embed = DEFAULT_EMBED.copy()\n for proc in psutil.process_iter():\n if proc.name() in PROCESSES.keys():\n running_processes.append(proc.name())\n elif proc.name() in ['java.exe', 'javaw.exe'] and proc.cwd(\n ) in PROCESSES.keys():\n running_processes.append(proc.cwd())\n for process in PROCESSES:\n try:\n if process in running_processes:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Online <:GreenTick:592083498534174721>', inline=\n self.inline)\n else:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Offline <:RedCross:592082557961633877>', inline=\n self.inline)\n except PermissionError:\n new_embed.add_field(name=PROCESSES.get(process), value=\n 'Admin Required <:OrangeUnknown:592082676891123722>',\n inline=self.inline)\n await msg.edit(content='', embed=new_embed)\n <function token>\n\n async def delete_bot_msg(self, channel):\n \"\"\"\n Deletes up to the last 100 messages sent by the bot in the given channel\n :param channel: The channel that will have the messages deleted\n :return: the message that says how many messages were deleted\n \"\"\"\n await channel.purge(limit=100, check=self.is_me)\n <function token>\n <function token>\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n"
] | false |
521 |
9c09309d23510aee4409a6d9021c2991afd2d349
|
################################################################################
#
# titleStrip.py
#
# Generates an output file with the titles of the input stripped
# Usage:
# python titleStrip.py [input filename] [output filename]
#
################################################################################
import os, sys
# Globals / Settings
strip_target = ['Wizards of', 'Random Generator'] # Keys for removal between input and output
output_name = 'titleStrip_out.txt' # default output filename is out.txt
# There will be a better home for this, mhm...
def clearConsole ():
os.system ('cls' if os.name == 'nt' else 'clear')
def main():
checkArgs()
# Open up the input / output files (read / write modes respectively)
rfile = open (sys.argv[1], 'r')
wfile = open (output_name, 'w')
parseAndStrip (rfile, wfile)
# Close the input / output files now that we are done
rfile.close()
wfile.close()
# checkArgs
# 1. Verifies that the number of arguments is acceptable
# 2. Reads in optional output filename
def checkArgs ():
# Verify number of input arguments
if len (sys.argv) < 2 or len (sys.argv) > 3:
print ("Usage Error:\t\tThe program needs (at least) an input filename to run.")
print ("Correct Usage:\t\tpython titleStrip.py [input filename]")
print ("Alternate Usage:\t\tpython titleStrip.py [input filename] [output filename]")
sys.exit(1)
# Read in optional output filename if any
if len (sys.argv) == 3:
global output_name # Use the global output_name
output_name = sys.argv [2] # Set the name
# parseAndStrip
# Reads through rfile and copies lines into wfile
# If we find a line to remove, we do not copy it into wfile
def parseAndStrip ( rfile, wfile ):
while True:
line = rfile.readline() # read in a line
if not line: return # leave this function if we are done
# Check to see if line has a key for removal
skip = 0
for key in strip_target:
if key in line:
skip = 1
# Only copy from rfile to wfile if skip == 0
if skip == 0:
wfile.write (line)
main()
|
[
"################################################################################\n#\n#\ttitleStrip.py\n#\n#\tGenerates an output file with the titles of the input stripped\n#\tUsage:\n#\t\tpython titleStrip.py [input filename] [output filename]\n#\n################################################################################\n\nimport os, sys\n\n# Globals / Settings\nstrip_target = ['Wizards of', 'Random Generator']\t\t# Keys for removal between input and output\noutput_name = 'titleStrip_out.txt'\t\t\t\t\t\t# default output filename is out.txt\n\n# There will be a better home for this, mhm...\ndef clearConsole ():\n\tos.system ('cls' if os.name == 'nt' else 'clear')\n\t\ndef main():\n\tcheckArgs()\n\t\n\t# Open up the input / output files (read / write modes respectively)\n\trfile = open (sys.argv[1], 'r')\n\twfile = open (output_name, 'w')\n\n\tparseAndStrip (rfile, wfile) \n\t\n\t# Close the input / output files now that we are done\n\trfile.close()\n\twfile.close()\n\n# checkArgs\n#\t1. Verifies that the number of arguments is acceptable\n#\t2. Reads in optional output filename\ndef checkArgs ():\n\t# Verify number of input arguments\n\tif len (sys.argv) < 2 or len (sys.argv) > 3:\n\t\tprint (\"Usage Error:\\t\\tThe program needs (at least) an input filename to run.\")\n\t\tprint (\"Correct Usage:\\t\\tpython titleStrip.py [input filename]\")\n\t\tprint (\"Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]\")\n\t\tsys.exit(1)\n\t\n\t# Read in optional output filename if any\n\tif len (sys.argv) == 3:\n\t\tglobal output_name\t\t\t\t# Use the global output_name\n\t\toutput_name = sys.argv [2]\t\t# Set the name\n\n# parseAndStrip\n#\tReads through rfile and copies lines into wfile\n#\tIf we find a line to remove, we do not copy it into wfile\t\t\ndef parseAndStrip ( rfile, wfile ):\n\twhile True:\n\t\tline = rfile.readline()\t\t# read in a line\n\t\tif not line: return\t\t\t# leave this function if we are done\n\t\t\n\t\t# Check to see if line has a key for removal\n\t\tskip = 0\n\t\tfor key in strip_target:\n\t\t\tif key in line:\n\t\t\t\tskip = 1\n\t\t\n\t\t# Only copy from rfile to wfile if skip == 0\n\t\tif skip == 0:\n\t\t\twfile.write (line)\n\nmain()\n\t",
"import os, sys\nstrip_target = ['Wizards of', 'Random Generator']\noutput_name = 'titleStrip_out.txt'\n\n\ndef clearConsole():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\nmain()\n",
"<import token>\nstrip_target = ['Wizards of', 'Random Generator']\noutput_name = 'titleStrip_out.txt'\n\n\ndef clearConsole():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\nmain()\n",
"<import token>\n<assignment token>\n\n\ndef clearConsole():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\nmain()\n",
"<import token>\n<assignment token>\n\n\ndef clearConsole():\n os.system('cls' if os.name == 'nt' else 'clear')\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef main():\n checkArgs()\n rfile = open(sys.argv[1], 'r')\n wfile = open(output_name, 'w')\n parseAndStrip(rfile, wfile)\n rfile.close()\n wfile.close()\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef checkArgs():\n if len(sys.argv) < 2 or len(sys.argv) > 3:\n print(\n 'Usage Error:\\t\\tThe program needs (at least) an input filename to run.'\n )\n print('Correct Usage:\\t\\tpython titleStrip.py [input filename]')\n print(\n 'Alternate Usage:\\t\\tpython titleStrip.py [input filename] [output filename]'\n )\n sys.exit(1)\n if len(sys.argv) == 3:\n global output_name\n output_name = sys.argv[2]\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef parseAndStrip(rfile, wfile):\n while True:\n line = rfile.readline()\n if not line:\n return\n skip = 0\n for key in strip_target:\n if key in line:\n skip = 1\n if skip == 0:\n wfile.write(line)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
522 |
8f3abc5beaded94b6d7b93ac2cfcd12145d75fe8
|
class Meta(type):
def __new__(meta, name, bases, class_dict):
print(f'* Running {meta}.__new__ for {name}')
print("Bases:", bases)
print(class_dict)
return type.__new__(meta, name, bases, class_dict)
class MyClass(metaclass=Meta):
stuff = 123
def foo(self):
pass
class MySubClass(MyClass):
ofther = 456
def bar(self):
pass
print("")
class MyClass2:
stuff = 123
def __init_subclass__(cls):
super().__init_subclass__()
print(f'* Running {cls.__name__}.__init_subclass__')
print(cls.__dict__)
print(cls.super().__dict__)
def foo(self):
pass
class MySubClass2(MyClass2):
ofther = 456
def bar(self):
pass
|
[
"class Meta(type):\n def __new__(meta, name, bases, class_dict):\n print(f'* Running {meta}.__new__ for {name}')\n print(\"Bases:\", bases)\n print(class_dict)\n return type.__new__(meta, name, bases, class_dict)\n\nclass MyClass(metaclass=Meta):\n stuff = 123\n\n def foo(self):\n pass\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\nprint(\"\")\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n\n def foo(self):\n pass\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"class Meta(type):\n\n def __new__(meta, name, bases, class_dict):\n print(f'* Running {meta}.__new__ for {name}')\n print('Bases:', bases)\n print(class_dict)\n return type.__new__(meta, name, bases, class_dict)\n\n\nclass MyClass(metaclass=Meta):\n stuff = 123\n\n def foo(self):\n pass\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\nprint('')\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"class Meta(type):\n\n def __new__(meta, name, bases, class_dict):\n print(f'* Running {meta}.__new__ for {name}')\n print('Bases:', bases)\n print(class_dict)\n return type.__new__(meta, name, bases, class_dict)\n\n\nclass MyClass(metaclass=Meta):\n stuff = 123\n\n def foo(self):\n pass\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"class Meta(type):\n <function token>\n\n\nclass MyClass(metaclass=Meta):\n stuff = 123\n\n def foo(self):\n pass\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n\n\nclass MyClass(metaclass=Meta):\n stuff = 123\n\n def foo(self):\n pass\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n\n\nclass MyClass(metaclass=Meta):\n <assignment token>\n\n def foo(self):\n pass\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n\n\nclass MyClass(metaclass=Meta):\n <assignment token>\n <function token>\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n\n\nclass MySubClass(MyClass):\n ofther = 456\n\n def bar(self):\n pass\n\n\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n\n\nclass MySubClass(MyClass):\n <assignment token>\n\n def bar(self):\n pass\n\n\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n\n\nclass MySubClass(MyClass):\n <assignment token>\n <function token>\n\n\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n<class token>\n<code token>\n\n\nclass MyClass2:\n stuff = 123\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n<class token>\n<code token>\n\n\nclass MyClass2:\n <assignment token>\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n\n def foo(self):\n pass\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n<class token>\n<code token>\n\n\nclass MyClass2:\n <assignment token>\n\n def __init_subclass__(cls):\n super().__init_subclass__()\n print(f'* Running {cls.__name__}.__init_subclass__')\n print(cls.__dict__)\n print(cls.super().__dict__)\n <function token>\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n<class token>\n<code token>\n\n\nclass MyClass2:\n <assignment token>\n <function token>\n <function token>\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n<class token>\n<code token>\n<class token>\n\n\nclass MySubClass2(MyClass2):\n ofther = 456\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n<class token>\n<code token>\n<class token>\n\n\nclass MySubClass2(MyClass2):\n <assignment token>\n\n def bar(self):\n pass\n",
"<class token>\n<class token>\n<class token>\n<code token>\n<class token>\n\n\nclass MySubClass2(MyClass2):\n <assignment token>\n <function token>\n",
"<class token>\n<class token>\n<class token>\n<code token>\n<class token>\n<class token>\n"
] | false |
523 |
6801d68ebcc6ff52d9be92efeeb8727997a14bbd
|
#!/usr/bin/env python
import json
import requests
from requests.auth import HTTPBasicAuth
if __name__ == "__main__":
auth = HTTPBasicAuth('cisco', 'cisco')
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
url = "https://asav/api/interfaces/physical/GigabitEthernet0_API_SLASH_0"
body = {
"kind": "object#GigabitInterface",
"interfaceDesc": "Configured by Python"
}
requests.packages.urllib3.disable_warnings()
response = requests.patch(url, data=json.dumps(body), auth=auth, headers=headers, verify=False)
|
[
"#!/usr/bin/env python\n\nimport json\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\nif __name__ == \"__main__\":\n\n auth = HTTPBasicAuth('cisco', 'cisco')\n headers = {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n }\n\n url = \"https://asav/api/interfaces/physical/GigabitEthernet0_API_SLASH_0\"\n\n body = {\n \"kind\": \"object#GigabitInterface\",\n \"interfaceDesc\": \"Configured by Python\"\n }\n\n requests.packages.urllib3.disable_warnings()\n response = requests.patch(url, data=json.dumps(body), auth=auth, headers=headers, verify=False)\n\n\n\n\n",
"import json\nimport requests\nfrom requests.auth import HTTPBasicAuth\nif __name__ == '__main__':\n auth = HTTPBasicAuth('cisco', 'cisco')\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n url = 'https://asav/api/interfaces/physical/GigabitEthernet0_API_SLASH_0'\n body = {'kind': 'object#GigabitInterface', 'interfaceDesc':\n 'Configured by Python'}\n requests.packages.urllib3.disable_warnings()\n response = requests.patch(url, data=json.dumps(body), auth=auth,\n headers=headers, verify=False)\n",
"<import token>\nif __name__ == '__main__':\n auth = HTTPBasicAuth('cisco', 'cisco')\n headers = {'Accept': 'application/json', 'Content-Type': 'application/json'\n }\n url = 'https://asav/api/interfaces/physical/GigabitEthernet0_API_SLASH_0'\n body = {'kind': 'object#GigabitInterface', 'interfaceDesc':\n 'Configured by Python'}\n requests.packages.urllib3.disable_warnings()\n response = requests.patch(url, data=json.dumps(body), auth=auth,\n headers=headers, verify=False)\n",
"<import token>\n<code token>\n"
] | false |
524 |
6e3bb17696953256af6d8194128427acebf1daac
|
from random import randint, shuffle
class Generator:
opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']
@staticmethod
def generate(level):
"""
根据 level 生成指定等级的算术题
0:小学;1:初中;2:高中
"""
"""
生成操作数序列以及二元运算符序列
"""
length = randint(0 if level else 1, 4)
op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]
numArr = [randint(1, 100) for i in range(length + 1)]
"""
生成二元运算符的位置
"""
remain = 1
position = []
for i in range(length):
position.append(randint(0, remain))
remain += 1 - position[i]
if remain > 1:
position[-1] += remain - 1
"""
生成一元运算符序列
"""
op1Arr = []
if level:
if level == 1:
op1Arr.append(Generator.opset[randint(4, 5)])
elif level == 2:
op1Arr.append(Generator.opset[randint(6, 8)])
for i in range(randint(0, level)):
op1Arr.append(
Generator.opset[randint(4, 5 if level == 1 else 8)])
shuffle(op1Arr)
"""
生成后缀表达式
"""
expression = numArr
offset = 2
index = 0
for i in range(length):
for j in range(position[i]):
expression.insert(i + j + offset, op2Arr[index])
index += 1
offset += position[i]
for op in op1Arr:
expression.insert(randint(1, len(expression)), op)
def getPriority(item):
"""
返回运算符或操作数的优先级
操作数:0
一元运算符:1
'*'、'/':2
'+'、'-':3
"""
if isinstance(item, int):
return 0
elif item == '+' or item == '-':
return 3
elif item == '*' or item == '/':
return 2
else:
return 1
"""
转换成中缀表达式
stack 存储 (expression, priority)
"""
stack = []
for e in expression:
priority = getPriority(e)
if priority == 0:
"""
是一个操作数,直接入栈
"""
stack.append((e, 0))
elif priority == 3:
"""
是加/减运算,优先级最低,拼接后直接入栈
"""
item2 = stack.pop()[0]
item1 = stack.pop()[0]
stack.append(('%s%s%s' % (item1, e, item2), 3))
elif priority == 2:
"""
是乘/除运算,如果有加/减运算需要加括号
"""
item2, prio2 = stack.pop()
if prio2 > 2:
item2 = '(%s)' % item2
item1, prio1 = stack.pop()
if prio1 > 2:
item1 = '(%s)' % item1
stack.append(('%s%s%s' % (item1, e, item2), 2))
elif priority == 1:
"""
是一元运算,除了操作数都要加括号
"""
item, prio = stack.pop()
if prio:
item = '(%s)' % item
if e == '²':
stack.append(('%s%s' % (item, '²'), 1))
else:
stack.append(('%s%s' % (e, item), 1))
return stack[0][0]
|
[
"from random import randint, shuffle\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(\n Generator.opset[randint(4, 5 if level == 1 else 8)])\n shuffle(op1Arr)\n\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n\n return stack[0][0]\n",
"from random import randint, shuffle\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"<import token>\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"<import token>\n\n\nclass Generator:\n <assignment token>\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"<import token>\n\n\nclass Generator:\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
525 |
297b2ff6c6022bd8aac09c25537a132f67e05174
|
from PIL import Image
source = Image.open("map4.png")
img = source.load()
map_data = {}
curr_x = 1
curr_y = 1
#Go over each chunk and get the pixel info
for x in range(0, 100, 10):
curr_x = x+1
for y in range(0, 100, 10):
curr_y = y+1
chunk = str(curr_x)+"X"+str(curr_y)
if chunk not in map_data:
map_data[chunk] = {}
for j in range(0, 10):
for k in range(0, 10):
loc = str(curr_x+j)+"x"+str(curr_y+k)
map_data[chunk][loc] = img[x+j, y+k]
#print map_data.keys()
#print map_data["1X1"]
print len(map_data.keys())
print len(map_data["1X1"].keys())
|
[
"from PIL import Image\n\nsource = Image.open(\"map4.png\")\nimg = source.load()\n\nmap_data = {}\n\ncurr_x = 1\ncurr_y = 1\n#Go over each chunk and get the pixel info\nfor x in range(0, 100, 10):\n\tcurr_x = x+1\n\tfor y in range(0, 100, 10):\n\t\tcurr_y = y+1\n\t\tchunk = str(curr_x)+\"X\"+str(curr_y)\n\t\tif chunk not in map_data:\n\t\t\tmap_data[chunk] = {}\n\t\tfor j in range(0, 10):\n\t\t\tfor k in range(0, 10):\n\t\t\t\tloc = str(curr_x+j)+\"x\"+str(curr_y+k)\n\t\t\t\tmap_data[chunk][loc] = img[x+j, y+k]\n#print map_data.keys()\n#print map_data[\"1X1\"]\nprint len(map_data.keys())\nprint len(map_data[\"1X1\"].keys())\n"
] | true |
526 |
83117000f5f34490cb14580a9867b1e871ccc2ae
|
from services.BureauActif.libbureauactif.db.Base import db, BaseModel
class BureauActifCalendarDataType(db.Model, BaseModel):
__tablename__ = "ba_calendar_data_type"
id_calendar_data_type = db.Column(db.Integer, db.Sequence('id_calendar_data_type_sequence'), primary_key=True,
autoincrement=True)
name = db.Column(db.String, nullable=False)
def to_json(self, ignore_fields=None, minimal=False):
if ignore_fields is None:
ignore_fields = []
return super().to_json(ignore_fields=ignore_fields)
@staticmethod
def create_defaults():
data = BureauActifCalendarDataType()
data.name = 'seating'
db.session.add(data)
data2 = BureauActifCalendarDataType()
data2.name = 'standing'
db.session.add(data2)
data3 = BureauActifCalendarDataType()
data3.name = 'positionChanges'
db.session.add(data3)
data4 = BureauActifCalendarDataType()
data4.name = 'absent'
db.session.add(data4)
db.session.commit()
|
[
"from services.BureauActif.libbureauactif.db.Base import db, BaseModel\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = \"ba_calendar_data_type\"\n id_calendar_data_type = db.Column(db.Integer, db.Sequence('id_calendar_data_type_sequence'), primary_key=True,\n autoincrement=True)\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n\n db.session.commit()\n",
"from services.BureauActif.libbureauactif.db.Base import db, BaseModel\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = 'ba_calendar_data_type'\n id_calendar_data_type = db.Column(db.Integer, db.Sequence(\n 'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True\n )\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"<import token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n __tablename__ = 'ba_calendar_data_type'\n id_calendar_data_type = db.Column(db.Integer, db.Sequence(\n 'id_calendar_data_type_sequence'), primary_key=True, autoincrement=True\n )\n name = db.Column(db.String, nullable=False)\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"<import token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n\n @staticmethod\n def create_defaults():\n data = BureauActifCalendarDataType()\n data.name = 'seating'\n db.session.add(data)\n data2 = BureauActifCalendarDataType()\n data2.name = 'standing'\n db.session.add(data2)\n data3 = BureauActifCalendarDataType()\n data3.name = 'positionChanges'\n db.session.add(data3)\n data4 = BureauActifCalendarDataType()\n data4.name = 'absent'\n db.session.add(data4)\n db.session.commit()\n",
"<import token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def to_json(self, ignore_fields=None, minimal=False):\n if ignore_fields is None:\n ignore_fields = []\n return super().to_json(ignore_fields=ignore_fields)\n <function token>\n",
"<import token>\n\n\nclass BureauActifCalendarDataType(db.Model, BaseModel):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
527 |
639669174435492f43bf51680c2724863017e9d2
|
import helpers
import os
import os.path
import json
import imp
import source.freesprints
from pygame.locals import *
class PluginLoader:
available_plugins = None
def __init__(self):
self.checkAvailablePlugins()
def checkAvailablePlugins(self):
print helpers.pluginsPath()
plugin_dirs = [plugin_path for plugin_path in os.listdir(helpers.pluginsPath())] #if os.path.isdir(f)]
self.available_plugins = []
for plugin_path in plugin_dirs:
print plugin_path
if plugin_path == ".DS_Store":
continue
plugin_path_absolute = os.path.join(helpers.pluginsPath(), plugin_path)
json_info_path = os.path.join(plugin_path_absolute, "info.json")
json_info_data = open(json_info_path)
jsonInfo = json.load(json_info_data)
self.available_plugins.append(Plugin(jsonInfo, plugin_path_absolute))
def getAvailablePlugins(self):
return self.available_plugins
class Plugin:
path = None
name = None
version = None
author = None
module = None
plugin_object = None
def __init__(self, info_json, path):
self.path = path
self.name = info_json.get("name")
self.version = info_json.get("version")
self.author = info_json.get("author")
print info_json
self.init_module()
def init_module(self):
#module = imp.find_module("pluginModule", [self.path])
self.module = imp.load_source("pluginModule", os.path.join(self.path, "__init__.py"))
print "FIND MODULE:"
print self.module
self.plugin_object = self.module.VisualisationPlugin(source.freesprints.get_app(), self)
#self.plugin_object.start()
#self.plugin_object.spinCount(123, 0)
def start(self, race_options):
source.freesprints.get_app().get_window_surface().fill(Color("black"))
self.plugin_object.start(race_options)
|
[
"import helpers\nimport os\nimport os.path\nimport json\nimport imp\nimport source.freesprints\nfrom pygame.locals import *\n\nclass PluginLoader:\n available_plugins = None\n \n def __init__(self):\n self.checkAvailablePlugins()\n \n def checkAvailablePlugins(self):\n print helpers.pluginsPath()\n \n plugin_dirs = [plugin_path for plugin_path in os.listdir(helpers.pluginsPath())] #if os.path.isdir(f)]\n \n self.available_plugins = []\n \n for plugin_path in plugin_dirs:\n print plugin_path\n if plugin_path == \".DS_Store\":\n continue\n \n plugin_path_absolute = os.path.join(helpers.pluginsPath(), plugin_path)\n json_info_path = os.path.join(plugin_path_absolute, \"info.json\")\n \n json_info_data = open(json_info_path)\n \n jsonInfo = json.load(json_info_data)\n \n self.available_plugins.append(Plugin(jsonInfo, plugin_path_absolute))\n \n \n \n def getAvailablePlugins(self):\n return self.available_plugins\n \nclass Plugin:\n path = None\n name = None\n version = None\n author = None\n \n module = None\n plugin_object = None\n \n def __init__(self, info_json, path):\n self.path = path\n \n self.name = info_json.get(\"name\")\n self.version = info_json.get(\"version\")\n self.author = info_json.get(\"author\")\n \n print info_json\n \n self.init_module()\n \n def init_module(self):\n #module = imp.find_module(\"pluginModule\", [self.path])\n self.module = imp.load_source(\"pluginModule\", os.path.join(self.path, \"__init__.py\"))\n \n print \"FIND MODULE:\"\n print self.module\n\n self.plugin_object = self.module.VisualisationPlugin(source.freesprints.get_app(), self)\n #self.plugin_object.start()\n #self.plugin_object.spinCount(123, 0)\n\n def start(self, race_options):\n source.freesprints.get_app().get_window_surface().fill(Color(\"black\"))\n self.plugin_object.start(race_options)\n\n\n\n"
] | true |
528 |
4462fec6e0edc25530c93ffeeae2372c86fef2cc
|
import numpy as np
import imutils
import cv2
image = cv2.imread("D:\\Github\\python-opencv\\images\\trex.png")
cv2.imshow("Original", image)
cv2.waitKey(0)
(h, w) = image.shape[:2] # get height and width of the image
center = (w/2, h/2) # which point to rotate around
M = cv2.getRotationMatrix2D(center, 45, 1.0) # rotation matrix
rotated = cv2.warpAffine(image, M, (w, h)) # apply the rotation
cv2. imshow("Rotated by 45 degrees", rotated)
cv2.waitKey(0)
M = cv2.getRotationMatrix2D(center, -90, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
cv2.imshow("Rotated by -90 degrees", rotated)
cv2.waitKey(0)
rotated = imutils.rotate(image, 180)
cv2.imshow("Rotated by 180", rotated)
cv2.waitKey(0)
|
[
"import numpy as np\nimport imutils\nimport cv2\n\nimage = cv2.imread(\"D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png\")\ncv2.imshow(\"Original\", image)\ncv2.waitKey(0)\n\n(h, w) = image.shape[:2] # get height and width of the image\ncenter = (w/2, h/2) # which point to rotate around\n\nM = cv2.getRotationMatrix2D(center, 45, 1.0) # rotation matrix\nrotated = cv2.warpAffine(image, M, (w, h)) # apply the rotation\ncv2. imshow(\"Rotated by 45 degrees\", rotated)\ncv2.waitKey(0)\n\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow(\"Rotated by -90 degrees\", rotated)\ncv2.waitKey(0)\n\nrotated = imutils.rotate(image, 180)\ncv2.imshow(\"Rotated by 180\", rotated)\ncv2.waitKey(0)",
"import numpy as np\nimport imutils\nimport cv2\nimage = cv2.imread('D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png')\ncv2.imshow('Original', image)\ncv2.waitKey(0)\nh, w = image.shape[:2]\ncenter = w / 2, h / 2\nM = cv2.getRotationMatrix2D(center, 45, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\nrotated = imutils.rotate(image, 180)\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"<import token>\nimage = cv2.imread('D:\\\\Github\\\\python-opencv\\\\images\\\\trex.png')\ncv2.imshow('Original', image)\ncv2.waitKey(0)\nh, w = image.shape[:2]\ncenter = w / 2, h / 2\nM = cv2.getRotationMatrix2D(center, 45, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\nM = cv2.getRotationMatrix2D(center, -90, 1.0)\nrotated = cv2.warpAffine(image, M, (w, h))\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\nrotated = imutils.rotate(image, 180)\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"<import token>\n<assignment token>\ncv2.imshow('Original', image)\ncv2.waitKey(0)\n<assignment token>\ncv2.imshow('Rotated by 45 degrees', rotated)\ncv2.waitKey(0)\n<assignment token>\ncv2.imshow('Rotated by -90 degrees', rotated)\ncv2.waitKey(0)\n<assignment token>\ncv2.imshow('Rotated by 180', rotated)\ncv2.waitKey(0)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
529 |
934921b22d036bd611134ce74f6eba3a2710018e
|
import cv2
import numpy as np
result=cv2.VideoCapture(0)
while True:
ret,square=result.read()
area=square[100:200,100:200]
cv2.imshow("video",square)
cv2.imshow("video2",area)
print(square)
if cv2.waitKey(25) & 0xff == ord('q'):
break
result.release()
cv2.destroyAllWindows()
|
[
"import cv2\nimport numpy as np\n\n\nresult=cv2.VideoCapture(0)\n\nwhile True:\n ret,square=result.read()\n area=square[100:200,100:200]\n cv2.imshow(\"video\",square)\n cv2.imshow(\"video2\",area)\n print(square)\n\n if cv2.waitKey(25) & 0xff == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"import cv2\nimport numpy as np\nresult = cv2.VideoCapture(0)\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"<import token>\nresult = cv2.VideoCapture(0)\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"<import token>\n<assignment token>\nwhile True:\n ret, square = result.read()\n area = square[100:200, 100:200]\n cv2.imshow('video', square)\n cv2.imshow('video2', area)\n print(square)\n if cv2.waitKey(25) & 255 == ord('q'):\n break\nresult.release()\ncv2.destroyAllWindows()\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
530 |
76d166bc227986863db77aa784be3de8110437ff
|
import logging, numpy as np, time, pandas as pd
from abc import abstractmethod
from kombu import binding
from tqdm import tqdm
from functools import lru_cache
from threading import Thread
from math import ceil
from copy import copy
from .pos import Position
from .base import BaseConsumer
from .event import SignalEventPct, OrderEvent
from .conf import LONG, SHORT, EXIT, MKT, BUY, SELL, LOCAL_TZ
from .util import clean_timestamp
from .errors import OverFilling
logger = logging.getLogger('Strategy')
class BaseStrategy(BaseConsumer):
"""Strategy is an abstract base class providing an interface for
all subsequent (inherited) strategy handling objects.
Goal
----
The goal of a (derived) Strategy object
- based on the inbound 'Tick', calcualte signals
- 'Signal' is at the symbol level which will be published
Note
----
This is designed to work both with historic and live data as
the Strategy object is agnostic to the data source,
since it obtains the 'Tick' object from MarketEvent message
"""
def __init__(
self, symbol_list, allocation, freq, positions,
start, end, warmup=0, fixed_allocation=True,
batch_size=10000
):
"""
Parameter:
----------
symbol_list (list): A list of Contract perm_tick (for data)
allocation (float): Dollar amount that this strategy is able to use
freq (conf.FREQ): Data Frequency type for this strategy (for data)
positions (dict of dict):
A dictionary with perm_tick and a dictionary of arguments
- pct_portfolio (float): percentage of the allocation
- rebalance (int): # of days to rebalance to pct_portfolio
- hard_stop (float): hard drawdown gate to close position
warmup (int): # of days to warmup the strategy
env_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}
which environment to run the startegy
start, end (datetime):
Only for backtesting to specificy the range of data to test
"""
n = ceil(freq.one_day)
num_pos = len(positions)
# getting neccesary parameters
self.symbol_list = symbol_list
self.freq = freq
self.warmup = warmup * n
if start:
self.start_dt = clean_timestamp(start)
if end:
self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1, days=1)
# allocation parameters for tracking portfolio
self.allocation = allocation
self.cash = allocation
self.commission = 0
self.fixed_allocation = fixed_allocation
pos_dict = {}
for perm_tick, v in positions.items():
# want to have position, must know its market ticks for decision
if perm_tick not in self.symbol_list:
self.symbol_list.append(perm_tick)
pos = Position(
perm_tick,
pct_portfolio=v.get('pct_portfolio', 1/num_pos),
rebalance=v.get('rebalance', 0) * n,
hard_stop=v.get('hard_stop', 0),
)
pos_dict[perm_tick] = pos
self.pos = pos_dict
# starting is always 0, it will increment itself every market tick
self.t = 0
self._hist = []
self.batch_size = batch_size
super().__init__(comp_type='STGY', required=['feed', 'exe'])
@abstractmethod
def calculate_signals(self):
"""Provide the mechanism to calculate a list of signals"""
raise NotImplementedError(
"Should implement calculate_signals()\n" + \
"By calling this method to calculate 'Signal' Events"
)
def subscriptions(self):
return [
('ack-reg-feed', self.id, self.on_ack_reg_feed),
('ack-dereg_feed', self.id, self.on_ack_dereg_feed),
('ack-reg-exe', self.id, self.on_ack_reg_exe),
('ack-dereg-exe', self.id, self.on_ack_dereg_exe),
('eod', self.id, self.on_eod),
('tick', self.id, self.on_market),
('fill', self.id, self.on_fill),
]
def update_data(self, ticks):
pass
def on_hard_stop(self, symbol):
pass
def on_rebalance(self, symbol):
pass
def has_position(self, symbol):
return self.pos[symbol].has_position
def has_open_orders(self, symbol):
return self.pos[symbol].has_open_orders
def has_long(self, symbol):
return self.pos[symbol].has_long
def has_short(self, symbol):
return self.pos[symbol].has_short
@property
def nav(self):
"""Net Account Value / Net Liquidating Value"""
return sum(pos.mv for pos in self.pos.values()) + self.cash
@property
def total_cost(self):
return sum(pos.cost for pos in self.pos.values())
@property
def total_bp(self):
if self.fixed_allocation:
return self.allocation
else:
return self.nav
@property
def avaliable_bp(self):
return self.total_bp - self.total_cost
def start(self):
while self.status != 'RUNNING':
time.sleep(2)
# setting up progress bar
self._pbar = tqdm(
total=int(np.ceil(
pd.bdate_range(self.start_dt, self.end_dt).size
* np.ceil(self.freq.one_day)
)),
miniters=int(np.ceil(self.freq.one_day)),
unit=' tick<{}>'.format(self.freq.value),
)
# publish event to get started
logger.info('Warming up Strategy')
self.basic_publish('warmup', sender=self.id)
logger.info('Really Starting up calculating Signals')
self.basic_publish('next', sender=self.id)
def on_ack_reg_feed(self, oid, body):
self.required['feed'] = True
def on_ack_reg_exe(self, oid, body):
self.required['exe'] = True
def on_ack_dereg_feed(self, oid, body):
self.required['feed'] = False
def on_ack_dereg_exe(self, oid, body):
self.required['exe'] = False
def on_eod(self, oid, body):
"""Handlering End of Data Event"""
self._pbar.update(self._pbar.total - self._pbar.n)
self._pbar.close()
self.basic_publish('dereg-feed', sender=self.id)
self.basic_publish('dereg-exe', sender=self.id)
self._stop()
def on_fill(self, oid, body):
"""Upon filled order
- update strategy's position, spot position reversion
- update holding time
- update position quantity
Parameter:
----------
fill (Fill Event)
"""
logger.info('Consuming filled Order')
fill = body['fill']
# update the position first
self.pos[fill.symbol].on_fill(fill)
# getting data from the fill event
Q = fill.quantity
K, D, C = fill.fill_cost, fill.fill_type, fill.commission
cost = D.value * K * Q
self.commission += C
self.cash -= cost + C
def on_market(self, oid, body):
"""On market event
- update information for each existing poistion
- generate orders for rebalancing()
- the strategy will calculate signal(s)
- and publish them to the exchange for processing
- then a "done" will be published to indicate
the strategy is finish doing everything this heartbeat
- so then the risk manager will collect all signals
before sending order for execution
Parameter:
----------
ticks (Market Event)
"""
if body['freq'] != self.freq: return
ticks = body['ticks']
self._update_data(ticks)
if self.t >= self.warmup:
self._calculate_signals()
# publish generated signals
equity = self.total_bp
bp = copy(self.avaliable_bp) # current snap_shot of buying power
for S, pos in self.pos.items():
for order, lvl in pos.generate_orders(equity):
used_bp = self.on_order(order, lvl, bp)
bp -= used_bp
# save old strategy performance history
self._pbar.update(1)
# if ticks.timestamp >= self.start_dt:
# self.basic_publish('next', sender=self.id)
if self.t >= self.warmup:
self._save_positions()
def on_order(self, order, lvl, bp):
"""Handling new order
- Orders are generated from signals
- will have to check currently avaliable buying power before publish
Parameter:
---------
order (Order Event)
lvl (str): Level of urgency for the order
This flag will be used to call corresponding callback
bp (float): The amount of avaliable buying power
Return:
-------
used buying power (float)
"""
S = order.symbol
need_bp = order.quantity * self.ticks[S].close
if need_bp <= bp: # have enough buying power to place order
used_bp = need_bp
if lvl == 'hard_stop':
self.on_hard_stop(S)
elif lvl == 'rebalance':
self.on_rebalance(S)
self.pos[order.symbol].confirm_order(order)
logger.info(
'Publish Order={} for Strategy={}'
.format(order, self.id)
)
self.basic_publish('order', sender=self.id, order=order)
else:
used_bp = 0
return used_bp
def generate_signal(self, symbol, signal_type, **kws):
"""Generate a signal that will stored at Strategy level
- Then all signals will be batch processed
Parameter
---------
symbol: str, the target symbol for the signal
signal_type: {LONG, SHORT, EXIT}
kws: additional arguments passes to the SignalEvent class
- especially the `strength` for percentage of portfolio
- if not passed, the default `pct_portfolio` will be used
"""
self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)
def _calculate_signals(self):
# update existing position information
for pos in self.pos.values():
pos._calculate_signals()
self.calculate_signals()
def _update_data(self, ticks):
"""Update the existing state of strategies
- based on given market observation
Note:
-----
1. It will always be called before calculating the new signal
2. this will be called no matter strategy is in warmup period or not
becuase warmup period is used for gathering nessceary data
"""
self.ticks = ticks
self.t += 1
for S, pos in self.pos.items():
pos._update_data(ticks[S])
self.update_data(ticks)
def _save_positions(self):
output = {
'timestamp': self.ticks.timestamp, 't': self.t,
'cash': self.cash, 'commission': self.commission,
'nav': self.nav,
}
for k, v in self.pos.items():
output[str(k)+'_quantity'] = v.quantity
output[str(k)+'_mv'] = v.mv
self._hist.append(output)
|
[
"import logging, numpy as np, time, pandas as pd\n\nfrom abc import abstractmethod\nfrom kombu import binding\nfrom tqdm import tqdm\nfrom functools import lru_cache\nfrom threading import Thread\nfrom math import ceil\nfrom copy import copy\n\nfrom .pos import Position\nfrom .base import BaseConsumer\nfrom .event import SignalEventPct, OrderEvent\nfrom .conf import LONG, SHORT, EXIT, MKT, BUY, SELL, LOCAL_TZ\nfrom .util import clean_timestamp\nfrom .errors import OverFilling\n\nlogger = logging.getLogger('Strategy')\n\n\n\nclass BaseStrategy(BaseConsumer):\n\t\"\"\"Strategy is an abstract base class providing an interface for\n\tall subsequent (inherited) strategy handling objects.\n\n\tGoal\n\t----\n\tThe goal of a (derived) Strategy object \n\t- based on the inbound 'Tick', calcualte signals\n\t- 'Signal' is at the symbol level which will be published\n\n\tNote\n\t----\n\tThis is designed to work both with historic and live data as\n\tthe Strategy object is agnostic to the data source,\n\tsince it obtains the 'Tick' object from MarketEvent message\n\t\"\"\"\n\tdef __init__(\n\t\tself, symbol_list, allocation, freq, positions,\n\t\tstart, end, warmup=0, fixed_allocation=True,\n\t\tbatch_size=10000\n\t):\n\t\t\"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n\t\tn = ceil(freq.one_day)\n\t\tnum_pos = len(positions)\n\n\t\t# getting neccesary parameters\n\t\tself.symbol_list = symbol_list\n\t\tself.freq = freq\n\t\tself.warmup = warmup * n\n\n\t\tif start:\n\t\t\tself.start_dt = clean_timestamp(start)\n\n\t\tif end:\n\t\t\tself.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1, days=1)\n\n\n\t\t# allocation parameters for tracking portfolio\n\t\tself.allocation = allocation\n\t\tself.cash = allocation\n\t\tself.commission = 0\n\t\tself.fixed_allocation = fixed_allocation\n\n\t\tpos_dict = {}\n\t\tfor perm_tick, v in positions.items():\n\t\t\t# want to have position, must know its market ticks for decision\n\t\t\tif perm_tick not in self.symbol_list:\n\t\t\t\tself.symbol_list.append(perm_tick)\n\n\t\t\tpos = Position(\n\t\t\t\tperm_tick,\n\t\t\t\tpct_portfolio=v.get('pct_portfolio', 1/num_pos),\n\t\t\t\trebalance=v.get('rebalance', 0) * n,\n\t\t\t\thard_stop=v.get('hard_stop', 0),\n\t\t\t)\n\t\t\tpos_dict[perm_tick] = pos\n\t\tself.pos = pos_dict\n\n\t\t# starting is always 0, it will increment itself every market tick\n\t\tself.t = 0\n\t\tself._hist = []\n\t\tself.batch_size = batch_size\n\n\t\tsuper().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n\n\t@abstractmethod\n\tdef calculate_signals(self):\n\t\t\"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n\t\traise NotImplementedError(\n\t\t\t\"Should implement calculate_signals()\\n\" + \\\n\t\t\t\"By calling this method to calculate 'Signal' Events\"\n\t\t)\n\n\tdef subscriptions(self):\n\t\treturn [\n\t\t\t('ack-reg-feed', self.id, self.on_ack_reg_feed),\n\t\t\t('ack-dereg_feed', self.id, self.on_ack_dereg_feed),\n\t\t\t('ack-reg-exe', self.id, self.on_ack_reg_exe),\n\t\t\t('ack-dereg-exe', self.id, self.on_ack_dereg_exe),\n\t\t\t('eod', self.id, self.on_eod),\n\t\t\t('tick', self.id, self.on_market),\n\t\t\t('fill', self.id, self.on_fill),\n\t\t]\n\t\t\n\tdef update_data(self, ticks):\n\t\tpass\n\n\tdef on_hard_stop(self, symbol):\n\t\tpass\n\n\tdef on_rebalance(self, symbol):\n\t\tpass\n\n\tdef has_position(self, symbol):\n\t\treturn self.pos[symbol].has_position\n\n\tdef has_open_orders(self, symbol):\n\t\treturn self.pos[symbol].has_open_orders\n\n\tdef has_long(self, symbol):\n\t\treturn self.pos[symbol].has_long\n\n\tdef has_short(self, symbol):\n\t\treturn self.pos[symbol].has_short\n\n\t@property\n\tdef nav(self):\n\t\t\"\"\"Net Account Value / Net Liquidating Value\"\"\"\n\t\treturn sum(pos.mv for pos in self.pos.values()) + self.cash\n\n\t@property\n\tdef total_cost(self):\n\t\treturn sum(pos.cost for pos in self.pos.values())\n\n\t@property\n\tdef total_bp(self):\n\t\tif self.fixed_allocation:\n\t\t\treturn self.allocation\n\t\telse:\n\t\t\treturn self.nav\n\n\t@property\n\tdef avaliable_bp(self):\n\t\treturn self.total_bp - self.total_cost\n\n\tdef start(self):\n\t\twhile self.status != 'RUNNING':\t\n\t\t\ttime.sleep(2)\n\n\t\t# setting up progress bar\n\t\tself._pbar = tqdm(\n\t\t\ttotal=int(np.ceil(\n\t\t\t\tpd.bdate_range(self.start_dt, self.end_dt).size\n\t\t\t\t* np.ceil(self.freq.one_day)\n\t\t\t)),\n\t\t\tminiters=int(np.ceil(self.freq.one_day)),\n\t\t\tunit=' tick<{}>'.format(self.freq.value),\n\t\t)\n\n\t\t# publish event to get started\n\t\tlogger.info('Warming up Strategy')\n\t\tself.basic_publish('warmup', sender=self.id)\n\t\tlogger.info('Really Starting up calculating Signals')\n\t\tself.basic_publish('next', sender=self.id)\n\n\n\tdef on_ack_reg_feed(self, oid, body):\n\t\tself.required['feed'] = True\n\n\tdef on_ack_reg_exe(self, oid, body):\n\t\tself.required['exe'] = True\n\n\tdef on_ack_dereg_feed(self, oid, body):\n\t\tself.required['feed'] = False\n\n\tdef on_ack_dereg_exe(self, oid, body):\n\t\tself.required['exe'] = False\n\n\n\tdef on_eod(self, oid, body):\n\t\t\"\"\"Handlering End of Data Event\"\"\"\n\t\tself._pbar.update(self._pbar.total - self._pbar.n)\n\t\tself._pbar.close()\n\n\t\tself.basic_publish('dereg-feed', sender=self.id)\n\t\tself.basic_publish('dereg-exe', sender=self.id)\n\n\t\tself._stop()\n\n\n\tdef on_fill(self, oid, body):\n\t\t\"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n\t\tlogger.info('Consuming filled Order')\n\t\tfill = body['fill']\n\n\t\t# update the position first\n\t\tself.pos[fill.symbol].on_fill(fill)\n\n\t\t# getting data from the fill event\n\t\tQ = fill.quantity\n\t\tK, D, C = fill.fill_cost, fill.fill_type, fill.commission\n\n\t\tcost = D.value * K * Q\n\n\t\tself.commission += C\n\t\tself.cash -= cost + C\n\n\n\tdef on_market(self, oid, body):\n\t\t\"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n\t\tif body['freq'] != self.freq: return\n\n\t\tticks = body['ticks']\n\t\tself._update_data(ticks)\n\n\t\tif self.t >= self.warmup:\n\t\t\tself._calculate_signals()\n\n\t\t\t# publish generated signals\n\t\t\tequity = self.total_bp\n\t\t\tbp = copy(self.avaliable_bp) # current snap_shot of buying power\n\t\t\tfor S, pos in self.pos.items():\n\t\t\t\tfor order, lvl in pos.generate_orders(equity):\n\t\t\t\t\tused_bp = self.on_order(order, lvl, bp)\n\t\t\t\t\tbp -= used_bp\n\t\t\t\t\n\t\t\t# save old strategy performance history\n\t\t\tself._pbar.update(1)\n\t\t\n\t\t# if ticks.timestamp >= self.start_dt:\n\t\t\t# self.basic_publish('next', sender=self.id)\n\n\t\tif self.t >= self.warmup:\n\t\t\tself._save_positions()\n\n\n\tdef on_order(self, order, lvl, bp):\n\t\t\"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n\t\tS = order.symbol\n\n\t\tneed_bp = order.quantity * self.ticks[S].close\n\t\tif need_bp <= bp: # have enough buying power to place order\n\t\t\tused_bp = need_bp\n\n\t\t\tif lvl == 'hard_stop':\n\t\t\t\tself.on_hard_stop(S)\n\t\t\telif lvl == 'rebalance':\n\t\t\t\tself.on_rebalance(S)\n\n\t\t\tself.pos[order.symbol].confirm_order(order)\n\t\t\tlogger.info(\n\t\t\t\t'Publish Order={} for Strategy={}'\n\t\t\t\t.format(order, self.id)\n\t\t\t)\n\t\t\tself.basic_publish('order', sender=self.id, order=order)\n\t\telse:\n\t\t\tused_bp = 0\n\t\treturn used_bp\n\n\n\tdef generate_signal(self, symbol, signal_type, **kws):\n\t\t\"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n\t\tself.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n\n\tdef _calculate_signals(self):\n\t\t# update existing position information\n\t\tfor pos in self.pos.values():\n\t\t\tpos._calculate_signals()\n\n\t\tself.calculate_signals()\n\n\n\tdef _update_data(self, ticks):\n\t\t\"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n\t\tself.ticks = ticks\n\t\tself.t += 1\n\n\t\tfor S, pos in self.pos.items():\n\t\t\tpos._update_data(ticks[S])\n\n\t\tself.update_data(ticks)\n\n\n\tdef _save_positions(self):\n\t\toutput = {\n\t\t\t'timestamp': self.ticks.timestamp, 't': self.t,\n\t\t\t'cash': self.cash, 'commission': self.commission,\n\t\t\t'nav': self.nav,\n\t\t}\n\t\tfor k, v in self.pos.items():\n\t\t\toutput[str(k)+'_quantity'] = v.quantity\n\t\t\toutput[str(k)+'_mv'] = v.mv\n\n\t\tself._hist.append(output)\n\t\t",
"import logging, numpy as np, time, pandas as pd\nfrom abc import abstractmethod\nfrom kombu import binding\nfrom tqdm import tqdm\nfrom functools import lru_cache\nfrom threading import Thread\nfrom math import ceil\nfrom copy import copy\nfrom .pos import Position\nfrom .base import BaseConsumer\nfrom .event import SignalEventPct, OrderEvent\nfrom .conf import LONG, SHORT, EXIT, MKT, BUY, SELL, LOCAL_TZ\nfrom .util import clean_timestamp\nfrom .errors import OverFilling\nlogger = logging.getLogger('Strategy')\n\n\nclass BaseStrategy(BaseConsumer):\n \"\"\"Strategy is an abstract base class providing an interface for\n\tall subsequent (inherited) strategy handling objects.\n\n\tGoal\n\t----\n\tThe goal of a (derived) Strategy object \n\t- based on the inbound 'Tick', calcualte signals\n\t- 'Signal' is at the symbol level which will be published\n\n\tNote\n\t----\n\tThis is designed to work both with historic and live data as\n\tthe Strategy object is agnostic to the data source,\n\tsince it obtains the 'Tick' object from MarketEvent message\n\t\"\"\"\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n\n def start(self):\n while self.status != 'RUNNING':\n time.sleep(2)\n self._pbar = tqdm(total=int(np.ceil(pd.bdate_range(self.start_dt,\n self.end_dt).size * np.ceil(self.freq.one_day))), miniters=int(\n np.ceil(self.freq.one_day)), unit=' tick<{}>'.format(self.freq.\n value))\n logger.info('Warming up Strategy')\n self.basic_publish('warmup', sender=self.id)\n logger.info('Really Starting up calculating Signals')\n self.basic_publish('next', sender=self.id)\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n\n def on_order(self, order, lvl, bp):\n \"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n S = order.symbol\n need_bp = order.quantity * self.ticks[S].close\n if need_bp <= bp:\n used_bp = need_bp\n if lvl == 'hard_stop':\n self.on_hard_stop(S)\n elif lvl == 'rebalance':\n self.on_rebalance(S)\n self.pos[order.symbol].confirm_order(order)\n logger.info('Publish Order={} for Strategy={}'.format(order,\n self.id))\n self.basic_publish('order', sender=self.id, order=order)\n else:\n used_bp = 0\n return used_bp\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\nlogger = logging.getLogger('Strategy')\n\n\nclass BaseStrategy(BaseConsumer):\n \"\"\"Strategy is an abstract base class providing an interface for\n\tall subsequent (inherited) strategy handling objects.\n\n\tGoal\n\t----\n\tThe goal of a (derived) Strategy object \n\t- based on the inbound 'Tick', calcualte signals\n\t- 'Signal' is at the symbol level which will be published\n\n\tNote\n\t----\n\tThis is designed to work both with historic and live data as\n\tthe Strategy object is agnostic to the data source,\n\tsince it obtains the 'Tick' object from MarketEvent message\n\t\"\"\"\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n\n def start(self):\n while self.status != 'RUNNING':\n time.sleep(2)\n self._pbar = tqdm(total=int(np.ceil(pd.bdate_range(self.start_dt,\n self.end_dt).size * np.ceil(self.freq.one_day))), miniters=int(\n np.ceil(self.freq.one_day)), unit=' tick<{}>'.format(self.freq.\n value))\n logger.info('Warming up Strategy')\n self.basic_publish('warmup', sender=self.id)\n logger.info('Really Starting up calculating Signals')\n self.basic_publish('next', sender=self.id)\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n\n def on_order(self, order, lvl, bp):\n \"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n S = order.symbol\n need_bp = order.quantity * self.ticks[S].close\n if need_bp <= bp:\n used_bp = need_bp\n if lvl == 'hard_stop':\n self.on_hard_stop(S)\n elif lvl == 'rebalance':\n self.on_rebalance(S)\n self.pos[order.symbol].confirm_order(order)\n logger.info('Publish Order={} for Strategy={}'.format(order,\n self.id))\n self.basic_publish('order', sender=self.id, order=order)\n else:\n used_bp = 0\n return used_bp\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n \"\"\"Strategy is an abstract base class providing an interface for\n\tall subsequent (inherited) strategy handling objects.\n\n\tGoal\n\t----\n\tThe goal of a (derived) Strategy object \n\t- based on the inbound 'Tick', calcualte signals\n\t- 'Signal' is at the symbol level which will be published\n\n\tNote\n\t----\n\tThis is designed to work both with historic and live data as\n\tthe Strategy object is agnostic to the data source,\n\tsince it obtains the 'Tick' object from MarketEvent message\n\t\"\"\"\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n\n def start(self):\n while self.status != 'RUNNING':\n time.sleep(2)\n self._pbar = tqdm(total=int(np.ceil(pd.bdate_range(self.start_dt,\n self.end_dt).size * np.ceil(self.freq.one_day))), miniters=int(\n np.ceil(self.freq.one_day)), unit=' tick<{}>'.format(self.freq.\n value))\n logger.info('Warming up Strategy')\n self.basic_publish('warmup', sender=self.id)\n logger.info('Really Starting up calculating Signals')\n self.basic_publish('next', sender=self.id)\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n\n def on_order(self, order, lvl, bp):\n \"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n S = order.symbol\n need_bp = order.quantity * self.ticks[S].close\n if need_bp <= bp:\n used_bp = need_bp\n if lvl == 'hard_stop':\n self.on_hard_stop(S)\n elif lvl == 'rebalance':\n self.on_rebalance(S)\n self.pos[order.symbol].confirm_order(order)\n logger.info('Publish Order={} for Strategy={}'.format(order,\n self.id))\n self.basic_publish('order', sender=self.id, order=order)\n else:\n used_bp = 0\n return used_bp\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n\n def start(self):\n while self.status != 'RUNNING':\n time.sleep(2)\n self._pbar = tqdm(total=int(np.ceil(pd.bdate_range(self.start_dt,\n self.end_dt).size * np.ceil(self.freq.one_day))), miniters=int(\n np.ceil(self.freq.one_day)), unit=' tick<{}>'.format(self.freq.\n value))\n logger.info('Warming up Strategy')\n self.basic_publish('warmup', sender=self.id)\n logger.info('Really Starting up calculating Signals')\n self.basic_publish('next', sender=self.id)\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n\n def on_order(self, order, lvl, bp):\n \"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n S = order.symbol\n need_bp = order.quantity * self.ticks[S].close\n if need_bp <= bp:\n used_bp = need_bp\n if lvl == 'hard_stop':\n self.on_hard_stop(S)\n elif lvl == 'rebalance':\n self.on_rebalance(S)\n self.pos[order.symbol].confirm_order(order)\n logger.info('Publish Order={} for Strategy={}'.format(order,\n self.id))\n self.basic_publish('order', sender=self.id, order=order)\n else:\n used_bp = 0\n return used_bp\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n\n def on_order(self, order, lvl, bp):\n \"\"\"Handling new order\n\t\t- Orders are generated from signals\n\t\t- will have to check currently avaliable buying power before publish\n\n\t\tParameter:\n\t\t---------\n\t\torder (Order Event)\n\t\tlvl (str): Level of urgency for the order\n\t\t\tThis flag will be used to call corresponding callback\n\t\tbp (float): The amount of avaliable buying power\n\n\t\tReturn:\n\t\t-------\n\t\tused buying power (float)\n\t\t\"\"\"\n S = order.symbol\n need_bp = order.quantity * self.ticks[S].close\n if need_bp <= bp:\n used_bp = need_bp\n if lvl == 'hard_stop':\n self.on_hard_stop(S)\n elif lvl == 'rebalance':\n self.on_rebalance(S)\n self.pos[order.symbol].confirm_order(order)\n logger.info('Publish Order={} for Strategy={}'.format(order,\n self.id))\n self.basic_publish('order', sender=self.id, order=order)\n else:\n used_bp = 0\n return used_bp\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n\n def has_open_orders(self, symbol):\n return self.pos[symbol].has_open_orders\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n <function token>\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n\n @property\n def nav(self):\n \"\"\"Net Account Value / Net Liquidating Value\"\"\"\n return sum(pos.mv for pos in self.pos.values()) + self.cash\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n <function token>\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n\n def on_ack_reg_exe(self, oid, body):\n self.required['exe'] = True\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n <function token>\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n\n def on_market(self, oid, body):\n \"\"\"On market event\n\t\t- update information for each existing poistion\n\t\t- generate orders for rebalancing()\n\t\t- the strategy will calculate signal(s)\n\t\t- and publish them to the exchange for processing\n\t\t- then a \"done\" will be published to indicate\n\t\t\tthe strategy is finish doing everything this heartbeat\n\t\t- so then the risk manager will collect all signals\n\t\t\tbefore sending order for execution\n\n\t\tParameter:\n\t\t----------\n\t\tticks (Market Event)\n\t\t\"\"\"\n if body['freq'] != self.freq:\n return\n ticks = body['ticks']\n self._update_data(ticks)\n if self.t >= self.warmup:\n self._calculate_signals()\n equity = self.total_bp\n bp = copy(self.avaliable_bp)\n for S, pos in self.pos.items():\n for order, lvl in pos.generate_orders(equity):\n used_bp = self.on_order(order, lvl, bp)\n bp -= used_bp\n self._pbar.update(1)\n if self.t >= self.warmup:\n self._save_positions()\n <function token>\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n\n def __init__(self, symbol_list, allocation, freq, positions, start, end,\n warmup=0, fixed_allocation=True, batch_size=10000):\n \"\"\"\n\t\tParameter:\n\t\t----------\n\t\tsymbol_list (list): A list of Contract perm_tick (for data)\n\t\tallocation (float): Dollar amount that this strategy is able to use\n\t\tfreq (conf.FREQ): Data Frequency type for this strategy (for data)\n\t\tpositions (dict of dict):\n\t\t\tA dictionary with perm_tick and a dictionary of arguments\n\n\t\t\t- pct_portfolio (float): percentage of the allocation\n\t\t\t- rebalance (int): # of days to rebalance to pct_portfolio\n\t\t\t- hard_stop (float): hard drawdown gate to close position\n\t\twarmup (int): # of days to warmup the strategy\n\t\tenv_type (string): {'BACKTEST', 'PAPPER', 'LIVE'}\n\t\t\twhich environment to run the startegy\n\t\tstart, end (datetime):\n\t\t\tOnly for backtesting to specificy the range of data to test\n\t\t\"\"\"\n n = ceil(freq.one_day)\n num_pos = len(positions)\n self.symbol_list = symbol_list\n self.freq = freq\n self.warmup = warmup * n\n if start:\n self.start_dt = clean_timestamp(start)\n if end:\n self.end_dt = clean_timestamp(end) + pd.DateOffset(seconds=-1,\n days=1)\n self.allocation = allocation\n self.cash = allocation\n self.commission = 0\n self.fixed_allocation = fixed_allocation\n pos_dict = {}\n for perm_tick, v in positions.items():\n if perm_tick not in self.symbol_list:\n self.symbol_list.append(perm_tick)\n pos = Position(perm_tick, pct_portfolio=v.get('pct_portfolio', \n 1 / num_pos), rebalance=v.get('rebalance', 0) * n,\n hard_stop=v.get('hard_stop', 0))\n pos_dict[perm_tick] = pos\n self.pos = pos_dict\n self.t = 0\n self._hist = []\n self.batch_size = batch_size\n super().__init__(comp_type='STGY', required=['feed', 'exe'])\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n\n def generate_signal(self, symbol, signal_type, **kws):\n \"\"\"Generate a signal that will stored at Strategy level\n\t\t- Then all signals will be batch processed\n\n\t\tParameter\n\t\t---------\n\t\tsymbol: str, the target symbol for the signal\n\t\tsignal_type: {LONG, SHORT, EXIT}\n\t\tkws: additional arguments passes to the SignalEvent class\n\t\t\t- especially the `strength` for percentage of portfolio\n\t\t\t- if not passed, the default `pct_portfolio` will be used\n\t\t\"\"\"\n self.pos[symbol]._generate_signal(signal_type, lvl='normal', **kws)\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n\n def on_rebalance(self, symbol):\n pass\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n\n def has_short(self, symbol):\n return self.pos[symbol].has_short\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n\n def has_long(self, symbol):\n return self.pos[symbol].has_long\n <function token>\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n\n def _save_positions(self):\n output = {'timestamp': self.ticks.timestamp, 't': self.t, 'cash':\n self.cash, 'commission': self.commission, 'nav': self.nav}\n for k, v in self.pos.items():\n output[str(k) + '_quantity'] = v.quantity\n output[str(k) + '_mv'] = v.mv\n self._hist.append(output)\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n\n def _update_data(self, ticks):\n \"\"\"Update the existing state of strategies\n\t\t- based on given market observation\n\n\t\tNote:\n\t\t-----\n\t\t1. It will always be called before calculating the new signal\n\t\t2. this will be called no matter strategy is in warmup period or not\n\t\t\tbecuase warmup period is used for gathering nessceary data\n\t\t\"\"\"\n self.ticks = ticks\n self.t += 1\n for S, pos in self.pos.items():\n pos._update_data(ticks[S])\n self.update_data(ticks)\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n\n def on_hard_stop(self, symbol):\n pass\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n\n @property\n def avaliable_bp(self):\n return self.total_bp - self.total_cost\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n\n def on_fill(self, oid, body):\n \"\"\"Upon filled order\n\t\t- update strategy's position, spot position reversion\n\t\t- update holding time\n\t\t- update position quantity\n\n\t\tParameter:\n\t\t----------\n\t\tfill (Fill Event)\n\t\t\"\"\"\n logger.info('Consuming filled Order')\n fill = body['fill']\n self.pos[fill.symbol].on_fill(fill)\n Q = fill.quantity\n K, D, C = fill.fill_cost, fill.fill_type, fill.commission\n cost = D.value * K * Q\n self.commission += C\n self.cash -= cost + C\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_cost(self):\n return sum(pos.cost for pos in self.pos.values())\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n\n def on_ack_reg_feed(self, oid, body):\n self.required['feed'] = True\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n\n @abstractmethod\n def calculate_signals(self):\n \"\"\"Provide the mechanism to calculate a list of signals\"\"\"\n raise NotImplementedError('Should implement calculate_signals()\\n' +\n \"By calling this method to calculate 'Signal' Events\")\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n\n def has_position(self, symbol):\n return self.pos[symbol].has_position\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_feed(self, oid, body):\n self.required['feed'] = False\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _calculate_signals(self):\n for pos in self.pos.values():\n pos._calculate_signals()\n self.calculate_signals()\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n\n def on_eod(self, oid, body):\n \"\"\"Handlering End of Data Event\"\"\"\n self._pbar.update(self._pbar.total - self._pbar.n)\n self._pbar.close()\n self.basic_publish('dereg-feed', sender=self.id)\n self.basic_publish('dereg-exe', sender=self.id)\n self._stop()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n\n def subscriptions(self):\n return [('ack-reg-feed', self.id, self.on_ack_reg_feed), (\n 'ack-dereg_feed', self.id, self.on_ack_dereg_feed), (\n 'ack-reg-exe', self.id, self.on_ack_reg_exe), ('ack-dereg-exe',\n self.id, self.on_ack_dereg_exe), ('eod', self.id, self.on_eod),\n ('tick', self.id, self.on_market), ('fill', self.id, self.on_fill)]\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def update_data(self, ticks):\n pass\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def total_bp(self):\n if self.fixed_allocation:\n return self.allocation\n else:\n return self.nav\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def on_ack_dereg_exe(self, oid, body):\n self.required['exe'] = False\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass BaseStrategy(BaseConsumer):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
531 |
3ae0149af78216d6cc85313ebaa6f7cd99185c05
|
def postfix(expression):
operators, stack = '+-*/', []
for item in expression.split():
if item not in operators:
stack.append(item)
else:
operand_1, operand_2 = stack.pop(), stack.pop()
stack.append(str(eval(operand_2 + item + operand_1)))
return int(float(stack.pop()))
|
[
"\ndef postfix(expression):\n operators, stack = '+-*/', []\n for item in expression.split():\n if item not in operators:\n stack.append(item)\n else:\n operand_1, operand_2 = stack.pop(), stack.pop()\n stack.append(str(eval(operand_2 + item + operand_1)))\n return int(float(stack.pop()))\n\n",
"def postfix(expression):\n operators, stack = '+-*/', []\n for item in expression.split():\n if item not in operators:\n stack.append(item)\n else:\n operand_1, operand_2 = stack.pop(), stack.pop()\n stack.append(str(eval(operand_2 + item + operand_1)))\n return int(float(stack.pop()))\n",
"<function token>\n"
] | false |
532 |
d95d899c6eae5a90c90d3d920ee40b38bf304805
|
#coding: utf-8
"""
1) Encontre em um texto os nomes próprios e os retorne em uma lista. Utilize o Regex (‘import re’) e a função findall(). Na versão básica, retorne todas as palavras que iniciam com maiúscula.
2) Apresente um plot de alguns segundos dos dados de acelerômetro do dataset:
https://archive.ics.uci.edu/ml/datasets/Activity+Recognition+from+Single+Chest-Mounted+Accelerometer#
Use a função read_csv() para abrir os arquivos
"""
if __name__ == "__main__":
pass
|
[
"#coding: utf-8\n\"\"\" \n1) Encontre em um texto os nomes próprios e os retorne em uma lista. Utilize o Regex (‘import re’) e a função findall(). Na versão básica, retorne todas as palavras que iniciam com maiúscula.\n\n\n2) Apresente um plot de alguns segundos dos dados de acelerômetro do dataset:\nhttps://archive.ics.uci.edu/ml/datasets/Activity+Recognition+from+Single+Chest-Mounted+Accelerometer#\nUse a função read_csv() para abrir os arquivos\n\n\"\"\"\n\nif __name__ == \"__main__\":\n\tpass",
"<docstring token>\nif __name__ == '__main__':\n pass\n",
"<docstring token>\n<code token>\n"
] | false |
533 |
dd7ade05ef912f7c094883507768cc21f95f31f6
|
"""
A module for constants.
"""
# fin adding notes for keys and uncomment
KEYS = [
"CM",
"GM"
# ,
# "DM",
# "AM",
# "EM",
# "BM",
# "FSM",
# "CSM",
# "Am",
# "Em",
# "Bm",
# "FSm",
# "CSm",
# "GSm",
# "DSm",
# "ASm",
]
NOTES_FOR_KEY = {
"CM": [
21,
23,
24,
26,
28,
29,
31,
33,
35,
36,
38,
40,
41,
43,
45,
47,
48,
50,
52,
53,
55,
57,
59,
60,
62,
64,
65,
67,
69,
71,
72,
74,
76,
77,
79,
81,
83,
84,
86,
88,
89,
91,
93,
95,
96,
98,
100,
101,
103,
105,
107,
108,
],
"GM": [
21,
23,
24,
26,
28,
30,
31,
33,
35,
36,
38,
40,
42,
43,
45,
47,
48,
50,
52,
54,
55,
57,
59,
60,
62,
64,
66,
67,
69,
71,
72,
74,
76,
78,
79,
81,
83,
84,
86,
88,
90,
91,
93,
95,
96,
98,
100,
102,
103,
105,
107,
108,
],
"DM": [],
"AM": [],
"EM": [],
"BM": [],
"FSM": [],
"CSM": [],
"Am": [],
"Em": [],
"Bm": [],
"FSm": [],
"CSm": [],
"GSm": [],
"DSm": [],
"ASm": [],
}
TONIC_NOTE_FOR_KEY = {
"CM": 60,
"GM": 67,
"DM": None,
"AM": None,
"EM": None,
"BM": None,
"FSM": None,
"CSM": None,
"Am": None,
"Em": None,
"Bm": None,
"FSm": None,
"CSm": None,
"GSm": None,
"DSm": None,
"ASm": None,
}
# add more chords later
STEPS_FOR_CHORD = {"major_triad": [0, 4, 7]}
# constants for value function
# add more complex rewards
NOTE_IN_KEY_REWARD = 1
NOTE_IN_CHORDS_REWARD = 1
SUPER_CONSONANT_INTERVAL_REWARD = 3
CONSONANT_INTERVAL_REWARD = 2
SOMEWHAT_CONSONANT_INTERVAL_REWARD = 1
DISSONANT_INTERVAL_REWARD = -2
SOMEWHAT_DISSONANT_INTERVAL_REWARD = -1
CENTRICITY_FACTOR = 1 # reward is number of times note occured before * CENTRICITY_FACTOR
|
[
"\"\"\"\nA module for constants.\n\n\"\"\"\n\n# fin adding notes for keys and uncomment \nKEYS = [\n \"CM\",\n \"GM\"\n # ,\n # \"DM\",\n # \"AM\",\n # \"EM\",\n # \"BM\",\n # \"FSM\",\n # \"CSM\",\n # \"Am\",\n # \"Em\",\n # \"Bm\",\n # \"FSm\",\n # \"CSm\",\n # \"GSm\",\n # \"DSm\",\n # \"ASm\",\n]\n\nNOTES_FOR_KEY = {\n \"CM\": [\n 21,\n 23,\n 24,\n 26,\n 28,\n 29,\n 31,\n 33,\n 35,\n 36,\n 38,\n 40,\n 41,\n 43,\n 45,\n 47,\n 48,\n 50,\n 52,\n 53,\n 55,\n 57,\n 59,\n 60,\n 62,\n 64,\n 65,\n 67,\n 69,\n 71,\n 72,\n 74,\n 76,\n 77,\n 79,\n 81,\n 83,\n 84,\n 86,\n 88,\n 89,\n 91,\n 93,\n 95,\n 96,\n 98,\n 100,\n 101,\n 103,\n 105,\n 107,\n 108,\n ],\n \"GM\": [\n 21,\n 23,\n 24,\n 26,\n 28,\n 30,\n 31,\n 33,\n 35,\n 36,\n 38,\n 40,\n 42,\n 43,\n 45,\n 47,\n 48,\n 50,\n 52,\n 54,\n 55,\n 57,\n 59,\n 60,\n 62,\n 64,\n 66,\n 67,\n 69,\n 71,\n 72,\n 74,\n 76,\n 78,\n 79,\n 81,\n 83,\n 84,\n 86,\n 88,\n 90,\n 91,\n 93,\n 95,\n 96,\n 98,\n 100,\n 102,\n 103,\n 105,\n 107,\n 108,\n ],\n \"DM\": [],\n \"AM\": [],\n \"EM\": [],\n \"BM\": [],\n \"FSM\": [],\n \"CSM\": [],\n \"Am\": [],\n \"Em\": [],\n \"Bm\": [],\n \"FSm\": [],\n \"CSm\": [],\n \"GSm\": [],\n \"DSm\": [],\n \"ASm\": [],\n}\n\nTONIC_NOTE_FOR_KEY = {\n \"CM\": 60,\n \"GM\": 67,\n \"DM\": None,\n \"AM\": None,\n \"EM\": None,\n \"BM\": None,\n \"FSM\": None,\n \"CSM\": None,\n \"Am\": None,\n \"Em\": None,\n \"Bm\": None,\n \"FSm\": None,\n \"CSm\": None,\n \"GSm\": None,\n \"DSm\": None,\n \"ASm\": None,\n}\n\n# add more chords later\nSTEPS_FOR_CHORD = {\"major_triad\": [0, 4, 7]}\n\n\n\n# constants for value function\n# add more complex rewards\nNOTE_IN_KEY_REWARD = 1\nNOTE_IN_CHORDS_REWARD = 1\nSUPER_CONSONANT_INTERVAL_REWARD = 3\nCONSONANT_INTERVAL_REWARD = 2\nSOMEWHAT_CONSONANT_INTERVAL_REWARD = 1\nDISSONANT_INTERVAL_REWARD = -2\nSOMEWHAT_DISSONANT_INTERVAL_REWARD = -1\nCENTRICITY_FACTOR = 1 # reward is number of times note occured before * CENTRICITY_FACTOR",
"<docstring token>\nKEYS = ['CM', 'GM']\nNOTES_FOR_KEY = {'CM': [21, 23, 24, 26, 28, 29, 31, 33, 35, 36, 38, 40, 41,\n 43, 45, 47, 48, 50, 52, 53, 55, 57, 59, 60, 62, 64, 65, 67, 69, 71, 72,\n 74, 76, 77, 79, 81, 83, 84, 86, 88, 89, 91, 93, 95, 96, 98, 100, 101, \n 103, 105, 107, 108], 'GM': [21, 23, 24, 26, 28, 30, 31, 33, 35, 36, 38,\n 40, 42, 43, 45, 47, 48, 50, 52, 54, 55, 57, 59, 60, 62, 64, 66, 67, 69,\n 71, 72, 74, 76, 78, 79, 81, 83, 84, 86, 88, 90, 91, 93, 95, 96, 98, 100,\n 102, 103, 105, 107, 108], 'DM': [], 'AM': [], 'EM': [], 'BM': [], 'FSM':\n [], 'CSM': [], 'Am': [], 'Em': [], 'Bm': [], 'FSm': [], 'CSm': [],\n 'GSm': [], 'DSm': [], 'ASm': []}\nTONIC_NOTE_FOR_KEY = {'CM': 60, 'GM': 67, 'DM': None, 'AM': None, 'EM':\n None, 'BM': None, 'FSM': None, 'CSM': None, 'Am': None, 'Em': None,\n 'Bm': None, 'FSm': None, 'CSm': None, 'GSm': None, 'DSm': None, 'ASm': None\n }\nSTEPS_FOR_CHORD = {'major_triad': [0, 4, 7]}\nNOTE_IN_KEY_REWARD = 1\nNOTE_IN_CHORDS_REWARD = 1\nSUPER_CONSONANT_INTERVAL_REWARD = 3\nCONSONANT_INTERVAL_REWARD = 2\nSOMEWHAT_CONSONANT_INTERVAL_REWARD = 1\nDISSONANT_INTERVAL_REWARD = -2\nSOMEWHAT_DISSONANT_INTERVAL_REWARD = -1\nCENTRICITY_FACTOR = 1\n",
"<docstring token>\n<assignment token>\n"
] | false |
534 |
0c68bd65cac3c8b9fd080900a00991b2d19260ee
|
from django.test import TestCase
from core.factories import CompanyFactory, EmployeeFactory
from core.pair_matcher import MaximumWeightGraphMatcher
class PairMatcherTestCase(TestCase):
def setUp(self):
self.company = CompanyFactory.create()
def test_simple(self):
employees = EmployeeFactory.create_batch(41, company=self.company)
matcher = MaximumWeightGraphMatcher()
groups = matcher.match(self.company, employees)
print('\n'.join(
[','.join(
e.user.username for e in group
) for group in groups]
))
|
[
"from django.test import TestCase\n\nfrom core.factories import CompanyFactory, EmployeeFactory\nfrom core.pair_matcher import MaximumWeightGraphMatcher\n\n\nclass PairMatcherTestCase(TestCase):\n def setUp(self):\n self.company = CompanyFactory.create()\n\n def test_simple(self):\n employees = EmployeeFactory.create_batch(41, company=self.company)\n matcher = MaximumWeightGraphMatcher()\n groups = matcher.match(self.company, employees)\n print('\\n'.join(\n [','.join(\n e.user.username for e in group\n ) for group in groups]\n ))\n",
"from django.test import TestCase\nfrom core.factories import CompanyFactory, EmployeeFactory\nfrom core.pair_matcher import MaximumWeightGraphMatcher\n\n\nclass PairMatcherTestCase(TestCase):\n\n def setUp(self):\n self.company = CompanyFactory.create()\n\n def test_simple(self):\n employees = EmployeeFactory.create_batch(41, company=self.company)\n matcher = MaximumWeightGraphMatcher()\n groups = matcher.match(self.company, employees)\n print('\\n'.join([','.join(e.user.username for e in group) for group in\n groups]))\n",
"<import token>\n\n\nclass PairMatcherTestCase(TestCase):\n\n def setUp(self):\n self.company = CompanyFactory.create()\n\n def test_simple(self):\n employees = EmployeeFactory.create_batch(41, company=self.company)\n matcher = MaximumWeightGraphMatcher()\n groups = matcher.match(self.company, employees)\n print('\\n'.join([','.join(e.user.username for e in group) for group in\n groups]))\n",
"<import token>\n\n\nclass PairMatcherTestCase(TestCase):\n\n def setUp(self):\n self.company = CompanyFactory.create()\n <function token>\n",
"<import token>\n\n\nclass PairMatcherTestCase(TestCase):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
535 |
32ed07a89a6f929a6c4b78fd79e687b85e01015b
|
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField,SubmitField, PasswordField, RadioField, MultipleFileField, SubmitField, TextAreaField
from wtforms.fields.html5 import EmailField, TelField, DateField
from wtforms.validators import DataRequired, Email, Length, InputRequired
class SignUpForm(FlaskForm):
id = StringField('ID*', validators=[DataRequired()])
fname = StringField('Full Name*', validators=[DataRequired()])
email = EmailField('Email Id*',validators=[DataRequired(), Email()])
password = PasswordField('Password*', validators=[DataRequired()])
contactno = TelField('Mobile No*.', validators=[DataRequired(), Length(min=10, max=10)])
design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])
submit = SubmitField('Sign Up >>')
class LoginForm(FlaskForm):
email = EmailField('Email Id*',validators=[DataRequired(), Email()])
password = PasswordField('Password*', validators=[DataRequired()])
design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])
submit = SubmitField('Login >>')
class ForgotForm(FlaskForm):
email = EmailField('Email Id*',validators=[DataRequired(), Email()])
design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])
submit = SubmitField('Change your Password')
class changepassword(FlaskForm):
password = PasswordField('Enter Password', validators=[DataRequired()])
submit = SubmitField('Change Password')
class ComplaintForm(FlaskForm):
fname = StringField('Full Name *', validators=[DataRequired()])
email = EmailField('Email Id*',validators=[DataRequired(), Email()])
date = DateField('Date', validators=[DataRequired()])
degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), ('masters', 'Masters')], validators=[DataRequired()])
semester = SelectField(u'Semester*', choices=[('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), ('fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), ('eighth', 'Eighth')], validators=[DataRequired()])
complaintcategory = SelectField(u'Complain Category*', choices=[('infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), ('academics', 'Academics'), ('management', 'Management'), ('faculty', 'Faculty'), ('library', 'Library')], validators=[DataRequired()])
message = TextAreaField('Enter Complain Details', validators=[DataRequired(), Length(max=100)])
#file = MultipleFileField(u'Upload File')
submit = SubmitField('Submit')
class complaint_status(FlaskForm):
status = SelectField(u'Complaint Status', choices=[('Pending', 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')])
submit = SubmitField('Update')
|
[
"from flask_wtf import FlaskForm\nfrom wtforms import StringField, SelectField,SubmitField, PasswordField, RadioField, MultipleFileField, SubmitField, TextAreaField\nfrom wtforms.fields.html5 import EmailField, TelField, DateField\nfrom wtforms.validators import DataRequired, Email, Length, InputRequired\n\nclass SignUpForm(FlaskForm):\n id = StringField('ID*', validators=[DataRequired()])\n fname = StringField('Full Name*', validators=[DataRequired()])\n email = EmailField('Email Id*',validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n contactno = TelField('Mobile No*.', validators=[DataRequired(), Length(min=10, max=10)])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Sign Up >>')\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*',validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*',validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), ('stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*',validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), ('masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), ('second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), ('fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), ('eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[('infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), ('academics', 'Academics'), ('management', 'Management'), ('faculty', 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[DataRequired(), Length(max=100)])\n #file = MultipleFileField(u'Upload File')\n submit = SubmitField('Submit')\n\n\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending', 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')])\n submit = SubmitField('Update')\n",
"from flask_wtf import FlaskForm\nfrom wtforms import StringField, SelectField, SubmitField, PasswordField, RadioField, MultipleFileField, SubmitField, TextAreaField\nfrom wtforms.fields.html5 import EmailField, TelField, DateField\nfrom wtforms.validators import DataRequired, Email, Length, InputRequired\n\n\nclass SignUpForm(FlaskForm):\n id = StringField('ID*', validators=[DataRequired()])\n fname = StringField('Full Name*', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n contactno = TelField('Mobile No*.', validators=[DataRequired(), Length(\n min=10, max=10)])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Sign Up >>')\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n\n\nclass SignUpForm(FlaskForm):\n id = StringField('ID*', validators=[DataRequired()])\n fname = StringField('Full Name*', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n contactno = TelField('Mobile No*.', validators=[DataRequired(), Length(\n min=10, max=10)])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Sign Up >>')\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n\n\nclass SignUpForm(FlaskForm):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n\n\nclass LoginForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n password = PasswordField('Password*', validators=[DataRequired()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Login >>')\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n\n\nclass LoginForm(FlaskForm):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n<class token>\n\n\nclass ForgotForm(FlaskForm):\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n design = SelectField(u'Designation*', choices=[('admin', 'Admin'), (\n 'stud', 'Student')], validators=[DataRequired()])\n submit = SubmitField('Change your Password')\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n<class token>\n\n\nclass ForgotForm(FlaskForm):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass changepassword(FlaskForm):\n password = PasswordField('Enter Password', validators=[DataRequired()])\n submit = SubmitField('Change Password')\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass changepassword(FlaskForm):\n <assignment token>\n <assignment token>\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ComplaintForm(FlaskForm):\n fname = StringField('Full Name *', validators=[DataRequired()])\n email = EmailField('Email Id*', validators=[DataRequired(), Email()])\n date = DateField('Date', validators=[DataRequired()])\n degree = SelectField(u'Degree*', choices=[('bachelors', 'Bachelors'), (\n 'masters', 'Masters')], validators=[DataRequired()])\n semester = SelectField(u'Semester*', choices=[('first', 'First'), (\n 'second', 'Second'), ('third', 'Third'), ('fourth', 'Fourth'), (\n 'fifth', 'Fifth'), ('sixth', 'Sixth'), ('seventh', 'Seventh'), (\n 'eighth', 'Eighth')], validators=[DataRequired()])\n complaintcategory = SelectField(u'Complain Category*', choices=[(\n 'infrastructure', 'Infrastructure'), ('accounts', 'Accounts'), (\n 'academics', 'Academics'), ('management', 'Management'), ('faculty',\n 'Faculty'), ('library', 'Library')], validators=[DataRequired()])\n message = TextAreaField('Enter Complain Details', validators=[\n DataRequired(), Length(max=100)])\n submit = SubmitField('Submit')\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ComplaintForm(FlaskForm):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass complaint_status(FlaskForm):\n status = SelectField(u'Complaint Status', choices=[('Pending',\n 'Pending'), ('Under Review', 'Under Review'), ('Resolved', 'Resolved')]\n )\n submit = SubmitField('Update')\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass complaint_status(FlaskForm):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
536 |
257f18db95e069c037341d2af372269e988b0a80
|
# Generated by Django 3.1.2 on 2021-07-02 05:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('asset', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='balance',
name='title',
),
]
|
[
"# Generated by Django 3.1.2 on 2021-07-02 05:38\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('asset', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='balance',\n name='title',\n ),\n ]\n",
"from django.db import migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [('asset', '0001_initial')]\n operations = [migrations.RemoveField(model_name='balance', name='title')]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('asset', '0001_initial')]\n operations = [migrations.RemoveField(model_name='balance', name='title')]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
537 |
74cb06ffa41748af431b46c9ff98eb91771a5015
|
#Get roll numbers, name & marks of the students of a class(get from user) and store these details in a file- marks.txt
count = int(input("How many students are there in class? "))
fileObj = open('marks.txt',"w")
for i in range(count):
print("Enter details for student",(i+1),"below:")
rollNo = int(input("Rollno: "))
name = input("Name: ")
marks = float(input("Marks: "))
records = str(rollNo) + "," + name + "," + str(marks) + '\n'
fileObj.write(records)
fileObj.close()
|
[
"#Get roll numbers, name & marks of the students of a class(get from user) and store these details in a file- marks.txt\n\ncount = int(input(\"How many students are there in class? \"))\nfileObj = open('marks.txt',\"w\")\n\nfor i in range(count):\n print(\"Enter details for student\",(i+1),\"below:\")\n rollNo = int(input(\"Rollno: \"))\n name = input(\"Name: \")\n marks = float(input(\"Marks: \"))\n records = str(rollNo) + \",\" + name + \",\" + str(marks) + '\\n'\n fileObj.write(records)\nfileObj.close()",
"count = int(input('How many students are there in class? '))\nfileObj = open('marks.txt', 'w')\nfor i in range(count):\n print('Enter details for student', i + 1, 'below:')\n rollNo = int(input('Rollno: '))\n name = input('Name: ')\n marks = float(input('Marks: '))\n records = str(rollNo) + ',' + name + ',' + str(marks) + '\\n'\n fileObj.write(records)\nfileObj.close()\n",
"<assignment token>\nfor i in range(count):\n print('Enter details for student', i + 1, 'below:')\n rollNo = int(input('Rollno: '))\n name = input('Name: ')\n marks = float(input('Marks: '))\n records = str(rollNo) + ',' + name + ',' + str(marks) + '\\n'\n fileObj.write(records)\nfileObj.close()\n",
"<assignment token>\n<code token>\n"
] | false |
538 |
955cf040aaf882328e31e6a943bce04cf721cb11
|
#!/usr/bin/env python3
#
# Copyright (C) 2011-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# =*= License: GPL-2 =*=
import os
import app
import re
from subprocess import call
from subprocess import check_output
import string
import definitions
import urllib2
import json
import utils
import ConfigParser
import StringIO
import re
def get_repo_url(repo):
url = repo.replace('upstream:', 'git://git.baserock.org/delta/')
url = url.replace('baserock:baserock/',
'git://git.baserock.org/baserock/baserock/')
url = url.replace('freedesktop:', 'git://anongit.freedesktop.org/')
url = url.replace('github:', 'git://github.com/')
url = url.replace('gnome:', 'git://git.gnome.org')
if url.endswith('.git'):
url = url[:-4]
return url
def get_repo_name(repo):
''' Convert URIs to strings that only contain digits, letters, _ and %.
NOTE: When changing the code of this function, make sure to also apply
the same to the quote_url() function of lorry. Otherwise the git tarballs
generated by lorry may no longer be found by morph.
'''
valid_chars = string.digits + string.ascii_letters + '%_'
transl = lambda x: x if x in valid_chars else '_'
return ''.join([transl(x) for x in get_repo_url(repo)])
def get_upstream_version(repo, ref):
try:
gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))
with app.chdir(gitdir), open(os.devnull, "w") as fnull:
last_tag = check_output(['git', 'describe', '--abbrev=0',
'--tags', ref], stderr=fnull)[0:-1]
commits = check_output(['git', 'rev-list', last_tag + '..' + ref,
'--count'])
result = "%s (%s + %s commits)" % (ref[:8], last_tag, commits[0:-1])
except:
result = ref[:8] + " " + "(No tag found)"
return result
def get_tree(this):
ref = this['ref']
gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))
if not os.path.exists(gitdir):
try:
url = (app.settings['cache-server-url'] + 'repo='
+ get_repo_url(this['repo']) + '&ref=' + ref)
with urllib2.urlopen(url) as response:
tree = json.loads(response.read().decode())['tree']
return tree
except:
app.log(this, 'WARNING: no tree from cache-server', ref)
mirror(this['name'], this['repo'])
with app.chdir(gitdir), open(os.devnull, "w") as fnull:
if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,
stderr=fnull):
# can't resolve this ref. is it upstream?
call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)
try:
tree = check_output(['git', 'rev-parse', ref + '^{tree}'],
universal_newlines=True)[0:-1]
return tree
except:
# either we don't have a git dir, or ref is not unique
# or ref does not exist
app.log(this, 'ERROR: could not find tree for ref', ref)
raise SystemExit
def copy_repo(repo, destdir):
'''Copies a cached repository into a directory using cp.
This also fixes up the repository afterwards, so that it can contain
code etc. It does not leave any given branch ready for use.
'''
# core.bare should be false so that git believes work trees are possible
# we do not want the origin remote to behave as a mirror for pulls
# we want a traditional refs/heads -> refs/remotes/origin ref mapping
# set the origin url to the cached repo so that we can quickly clean up
# by packing the refs, we can then edit then en-masse easily
call(['cp', '-a', repo, os.path.join(destdir, '.git')])
call(['git', 'config', 'core.bare', 'false'])
call(['git', 'config', '--unset', 'remote.origin.mirror'])
with open(os.devnull, "w") as fnull:
call(['git', 'config', 'remote.origin.fetch',
'+refs/heads/*:refs/remotes/origin/*'],
stdout=fnull,
stderr=fnull)
call(['git', 'config', 'remote.origin.url', repo])
call(['git', 'pack-refs', '--all', '--prune'])
# turn refs/heads/* into refs/remotes/origin/* in the packed refs
# so that the new copy behaves more like a traditional clone.
with open(os.path.join(destdir, ".git", "packed-refs"), "r") as ref_fh:
pack_lines = ref_fh.read().split("\n")
with open(os.path.join(destdir, ".git", "packed-refs"), "w") as ref_fh:
ref_fh.write(pack_lines.pop(0) + "\n")
for refline in pack_lines:
if ' refs/remotes/' in refline:
continue
if ' refs/heads/' in refline:
sha, ref = refline[:40], refline[41:]
if ref.startswith("refs/heads/"):
ref = "refs/remotes/origin/" + ref[11:]
refline = "%s %s" % (sha, ref)
ref_fh.write("%s\n" % (refline))
# Finally run a remote update to clear up the refs ready for use.
with open(os.devnull, "w") as fnull:
call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,
stderr=fnull)
def mirror(name, repo):
# try tarball first
gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))
repo_url = get_repo_url(repo)
try:
os.makedirs(gitdir)
tar_file = get_repo_name(repo_url) + '.tar'
app.log(name, 'Try fetching tarball %s' % tar_file)
with app.chdir(gitdir), open(os.devnull, "w") as fnull:
call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)
call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)
os.remove(tar_file)
call(['git', 'config', 'remote.origin.url', repo_url],
stdout=fnull, stderr=fnull)
call(['git', 'config', 'remote.origin.mirror', 'true'],
stdout=fnull, stderr=fnull)
if call(['git', 'config', 'remote.origin.fetch',
'+refs/*:refs/*'],
stdout=fnull, stderr=fnull) != 0:
raise BaseException('Did not get a valid git repo')
call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)
except:
app.log(name, 'Using git clone from ', repo_url)
try:
with open(os.devnull, "w") as fnull:
call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],
stdout=fnull, stderr=fnull)
except:
app.log(name, 'ERROR: failed to clone', repo)
raise SystemExit
app.log(name, 'Git repo is mirrored at', gitdir)
def fetch(repo):
with app.chdir(repo), open(os.devnull, "w") as fnull:
call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)
def checkout(name, repo, ref, checkoutdir):
gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))
if not os.path.exists(gitdir):
mirror(name, repo)
app.log(name, 'Upstream version:', get_upstream_version(repo, ref))
app.log(name, 'Git checkout %s in %s' % (repo, checkoutdir))
# checkout the required version of this from git
with app.chdir(checkoutdir), open(os.devnull, "w") as fnull:
copy_repo(gitdir, checkoutdir)
if call(['git', 'checkout', ref], stdout=fnull, stderr=fnull):
app.log(name, 'ERROR: git checkout failed for', ref)
raise SystemExit
if os.path.exists('.gitmodules'):
checkout_submodules(name, ref)
utils.set_mtime_recursively(checkoutdir)
def checkout_submodules(name, ref):
app.log(name, 'Git submodules')
with open('.gitmodules', "r") as gitfile:
# drop indentation in sections, as RawConfigParser cannot handle it
content = '\n'.join([l.strip() for l in gitfile.read().splitlines()])
io = StringIO.StringIO(content)
parser = ConfigParser.RawConfigParser()
parser.readfp(io)
for section in parser.sections():
# validate section name against the 'submodule "foo"' pattern
submodule = re.sub(r'submodule "(.*)"', r'\1', section)
url = parser.get(section, 'url')
path = parser.get(section, 'path')
try:
# list objects in the parent repo tree to find the commit
# object that corresponds to the submodule
commit = check_output(['git', 'ls-tree', ref, path])
# read the commit hash from the output
fields = commit.split()
if len(fields) >= 2 and fields[1] == 'commit':
submodule_commit = commit.split()[2]
# fail if the commit hash is invalid
if len(submodule_commit) != 40:
raise Exception
fulldir = os.path.join(os.getcwd(), path)
checkout(submodule, url, submodule_commit, fulldir)
else:
app.log(this, 'Skipping submodule "%s" as %s:%s has '
'a non-commit object for it' % (name, url))
except:
app.log(name, "ERROR: Git submodules problem")
raise SystemExit
|
[
"#!/usr/bin/env python3\n#\n# Copyright (C) 2011-2015 Codethink Limited\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; version 2 of the License.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# =*= License: GPL-2 =*=\n\nimport os\nimport app\nimport re\nfrom subprocess import call\nfrom subprocess import check_output\nimport string\nimport definitions\nimport urllib2\nimport json\nimport utils\nimport ConfigParser\nimport StringIO\nimport re\n\n\ndef get_repo_url(repo):\n url = repo.replace('upstream:', 'git://git.baserock.org/delta/')\n url = url.replace('baserock:baserock/',\n 'git://git.baserock.org/baserock/baserock/')\n url = url.replace('freedesktop:', 'git://anongit.freedesktop.org/')\n url = url.replace('github:', 'git://github.com/')\n url = url.replace('gnome:', 'git://git.gnome.org')\n if url.endswith('.git'):\n url = url[:-4]\n return url\n\n\ndef get_repo_name(repo):\n ''' Convert URIs to strings that only contain digits, letters, _ and %.\n\n NOTE: When changing the code of this function, make sure to also apply\n the same to the quote_url() function of lorry. Otherwise the git tarballs\n generated by lorry may no longer be found by morph.\n\n '''\n valid_chars = string.digits + string.ascii_letters + '%_'\n transl = lambda x: x if x in valid_chars else '_'\n return ''.join([transl(x) for x in get_repo_url(repo)])\n\n\ndef get_upstream_version(repo, ref):\n try:\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n with app.chdir(gitdir), open(os.devnull, \"w\") as fnull:\n last_tag = check_output(['git', 'describe', '--abbrev=0',\n '--tags', ref], stderr=fnull)[0:-1]\n commits = check_output(['git', 'rev-list', last_tag + '..' + ref,\n '--count'])\n\n result = \"%s (%s + %s commits)\" % (ref[:8], last_tag, commits[0:-1])\n except:\n result = ref[:8] + \" \" + \"(No tag found)\"\n\n return result\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = (app.settings['cache-server-url'] + 'repo='\n + get_repo_url(this['repo']) + '&ref=' + ref)\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n\n with app.chdir(gitdir), open(os.devnull, \"w\") as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n # can't resolve this ref. is it upstream?\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n\n except:\n # either we don't have a git dir, or ref is not unique\n # or ref does not exist\n\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n '''Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n '''\n\n # core.bare should be false so that git believes work trees are possible\n # we do not want the origin remote to behave as a mirror for pulls\n # we want a traditional refs/heads -> refs/remotes/origin ref mapping\n # set the origin url to the cached repo so that we can quickly clean up\n # by packing the refs, we can then edit then en-masse easily\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, \"w\") as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'],\n stdout=fnull,\n stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n\n # turn refs/heads/* into refs/remotes/origin/* in the packed refs\n # so that the new copy behaves more like a traditional clone.\n with open(os.path.join(destdir, \".git\", \"packed-refs\"), \"r\") as ref_fh:\n pack_lines = ref_fh.read().split(\"\\n\")\n with open(os.path.join(destdir, \".git\", \"packed-refs\"), \"w\") as ref_fh:\n ref_fh.write(pack_lines.pop(0) + \"\\n\")\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith(\"refs/heads/\"):\n ref = \"refs/remotes/origin/\" + ref[11:]\n refline = \"%s %s\" % (sha, ref)\n ref_fh.write(\"%s\\n\" % (refline))\n # Finally run a remote update to clear up the refs ready for use.\n with open(os.devnull, \"w\") as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\ndef mirror(name, repo):\n # try tarball first\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n repo_url = get_repo_url(repo)\n try:\n os.makedirs(gitdir)\n tar_file = get_repo_name(repo_url) + '.tar'\n app.log(name, 'Try fetching tarball %s' % tar_file)\n with app.chdir(gitdir), open(os.devnull, \"w\") as fnull:\n call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)\n call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)\n os.remove(tar_file)\n call(['git', 'config', 'remote.origin.url', repo_url],\n stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.mirror', 'true'],\n stdout=fnull, stderr=fnull)\n if call(['git', 'config', 'remote.origin.fetch',\n '+refs/*:refs/*'],\n stdout=fnull, stderr=fnull) != 0:\n raise BaseException('Did not get a valid git repo')\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'Using git clone from ', repo_url)\n try:\n with open(os.devnull, \"w\") as fnull:\n call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],\n stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'ERROR: failed to clone', repo)\n raise SystemExit\n\n app.log(name, 'Git repo is mirrored at', gitdir)\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, \"w\") as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\ndef checkout(name, repo, ref, checkoutdir):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n if not os.path.exists(gitdir):\n mirror(name, repo)\n app.log(name, 'Upstream version:', get_upstream_version(repo, ref))\n app.log(name, 'Git checkout %s in %s' % (repo, checkoutdir))\n # checkout the required version of this from git\n with app.chdir(checkoutdir), open(os.devnull, \"w\") as fnull:\n copy_repo(gitdir, checkoutdir)\n if call(['git', 'checkout', ref], stdout=fnull, stderr=fnull):\n app.log(name, 'ERROR: git checkout failed for', ref)\n raise SystemExit\n\n if os.path.exists('.gitmodules'):\n checkout_submodules(name, ref)\n\n utils.set_mtime_recursively(checkoutdir)\n\n\ndef checkout_submodules(name, ref):\n app.log(name, 'Git submodules')\n with open('.gitmodules', \"r\") as gitfile:\n # drop indentation in sections, as RawConfigParser cannot handle it\n content = '\\n'.join([l.strip() for l in gitfile.read().splitlines()])\n io = StringIO.StringIO(content)\n parser = ConfigParser.RawConfigParser()\n parser.readfp(io)\n\n for section in parser.sections():\n # validate section name against the 'submodule \"foo\"' pattern\n submodule = re.sub(r'submodule \"(.*)\"', r'\\1', section)\n url = parser.get(section, 'url')\n path = parser.get(section, 'path')\n\n try:\n # list objects in the parent repo tree to find the commit\n # object that corresponds to the submodule\n commit = check_output(['git', 'ls-tree', ref, path])\n\n # read the commit hash from the output\n fields = commit.split()\n if len(fields) >= 2 and fields[1] == 'commit':\n submodule_commit = commit.split()[2]\n\n # fail if the commit hash is invalid\n if len(submodule_commit) != 40:\n raise Exception\n\n fulldir = os.path.join(os.getcwd(), path)\n checkout(submodule, url, submodule_commit, fulldir)\n\n else:\n app.log(this, 'Skipping submodule \"%s\" as %s:%s has '\n 'a non-commit object for it' % (name, url))\n\n except:\n app.log(name, \"ERROR: Git submodules problem\")\n raise SystemExit\n",
"import os\nimport app\nimport re\nfrom subprocess import call\nfrom subprocess import check_output\nimport string\nimport definitions\nimport urllib2\nimport json\nimport utils\nimport ConfigParser\nimport StringIO\nimport re\n\n\ndef get_repo_url(repo):\n url = repo.replace('upstream:', 'git://git.baserock.org/delta/')\n url = url.replace('baserock:baserock/',\n 'git://git.baserock.org/baserock/baserock/')\n url = url.replace('freedesktop:', 'git://anongit.freedesktop.org/')\n url = url.replace('github:', 'git://github.com/')\n url = url.replace('gnome:', 'git://git.gnome.org')\n if url.endswith('.git'):\n url = url[:-4]\n return url\n\n\ndef get_repo_name(repo):\n \"\"\" Convert URIs to strings that only contain digits, letters, _ and %.\n\n NOTE: When changing the code of this function, make sure to also apply\n the same to the quote_url() function of lorry. Otherwise the git tarballs\n generated by lorry may no longer be found by morph.\n\n \"\"\"\n valid_chars = string.digits + string.ascii_letters + '%_'\n transl = lambda x: x if x in valid_chars else '_'\n return ''.join([transl(x) for x in get_repo_url(repo)])\n\n\ndef get_upstream_version(repo, ref):\n try:\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n last_tag = check_output(['git', 'describe', '--abbrev=0',\n '--tags', ref], stderr=fnull)[0:-1]\n commits = check_output(['git', 'rev-list', last_tag + '..' +\n ref, '--count'])\n result = '%s (%s + %s commits)' % (ref[:8], last_tag, commits[0:-1])\n except:\n result = ref[:8] + ' ' + '(No tag found)'\n return result\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n \"\"\"Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n \"\"\"\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, 'w') as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'], stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'r') as ref_fh:\n pack_lines = ref_fh.read().split('\\n')\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'w') as ref_fh:\n ref_fh.write(pack_lines.pop(0) + '\\n')\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith('refs/heads/'):\n ref = 'refs/remotes/origin/' + ref[11:]\n refline = '%s %s' % (sha, ref)\n ref_fh.write('%s\\n' % refline)\n with open(os.devnull, 'w') as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\ndef mirror(name, repo):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n repo_url = get_repo_url(repo)\n try:\n os.makedirs(gitdir)\n tar_file = get_repo_name(repo_url) + '.tar'\n app.log(name, 'Try fetching tarball %s' % tar_file)\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)\n call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)\n os.remove(tar_file)\n call(['git', 'config', 'remote.origin.url', repo_url], stdout=\n fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.mirror', 'true'], stdout=\n fnull, stderr=fnull)\n if call(['git', 'config', 'remote.origin.fetch',\n '+refs/*:refs/*'], stdout=fnull, stderr=fnull) != 0:\n raise BaseException('Did not get a valid git repo')\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'Using git clone from ', repo_url)\n try:\n with open(os.devnull, 'w') as fnull:\n call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],\n stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'ERROR: failed to clone', repo)\n raise SystemExit\n app.log(name, 'Git repo is mirrored at', gitdir)\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\ndef checkout(name, repo, ref, checkoutdir):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n if not os.path.exists(gitdir):\n mirror(name, repo)\n app.log(name, 'Upstream version:', get_upstream_version(repo, ref))\n app.log(name, 'Git checkout %s in %s' % (repo, checkoutdir))\n with app.chdir(checkoutdir), open(os.devnull, 'w') as fnull:\n copy_repo(gitdir, checkoutdir)\n if call(['git', 'checkout', ref], stdout=fnull, stderr=fnull):\n app.log(name, 'ERROR: git checkout failed for', ref)\n raise SystemExit\n if os.path.exists('.gitmodules'):\n checkout_submodules(name, ref)\n utils.set_mtime_recursively(checkoutdir)\n\n\ndef checkout_submodules(name, ref):\n app.log(name, 'Git submodules')\n with open('.gitmodules', 'r') as gitfile:\n content = '\\n'.join([l.strip() for l in gitfile.read().splitlines()])\n io = StringIO.StringIO(content)\n parser = ConfigParser.RawConfigParser()\n parser.readfp(io)\n for section in parser.sections():\n submodule = re.sub('submodule \"(.*)\"', '\\\\1', section)\n url = parser.get(section, 'url')\n path = parser.get(section, 'path')\n try:\n commit = check_output(['git', 'ls-tree', ref, path])\n fields = commit.split()\n if len(fields) >= 2 and fields[1] == 'commit':\n submodule_commit = commit.split()[2]\n if len(submodule_commit) != 40:\n raise Exception\n fulldir = os.path.join(os.getcwd(), path)\n checkout(submodule, url, submodule_commit, fulldir)\n else:\n app.log(this, \n 'Skipping submodule \"%s\" as %s:%s has a non-commit object for it'\n % (name, url))\n except:\n app.log(name, 'ERROR: Git submodules problem')\n raise SystemExit\n",
"<import token>\n\n\ndef get_repo_url(repo):\n url = repo.replace('upstream:', 'git://git.baserock.org/delta/')\n url = url.replace('baserock:baserock/',\n 'git://git.baserock.org/baserock/baserock/')\n url = url.replace('freedesktop:', 'git://anongit.freedesktop.org/')\n url = url.replace('github:', 'git://github.com/')\n url = url.replace('gnome:', 'git://git.gnome.org')\n if url.endswith('.git'):\n url = url[:-4]\n return url\n\n\ndef get_repo_name(repo):\n \"\"\" Convert URIs to strings that only contain digits, letters, _ and %.\n\n NOTE: When changing the code of this function, make sure to also apply\n the same to the quote_url() function of lorry. Otherwise the git tarballs\n generated by lorry may no longer be found by morph.\n\n \"\"\"\n valid_chars = string.digits + string.ascii_letters + '%_'\n transl = lambda x: x if x in valid_chars else '_'\n return ''.join([transl(x) for x in get_repo_url(repo)])\n\n\ndef get_upstream_version(repo, ref):\n try:\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n last_tag = check_output(['git', 'describe', '--abbrev=0',\n '--tags', ref], stderr=fnull)[0:-1]\n commits = check_output(['git', 'rev-list', last_tag + '..' +\n ref, '--count'])\n result = '%s (%s + %s commits)' % (ref[:8], last_tag, commits[0:-1])\n except:\n result = ref[:8] + ' ' + '(No tag found)'\n return result\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n \"\"\"Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n \"\"\"\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, 'w') as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'], stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'r') as ref_fh:\n pack_lines = ref_fh.read().split('\\n')\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'w') as ref_fh:\n ref_fh.write(pack_lines.pop(0) + '\\n')\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith('refs/heads/'):\n ref = 'refs/remotes/origin/' + ref[11:]\n refline = '%s %s' % (sha, ref)\n ref_fh.write('%s\\n' % refline)\n with open(os.devnull, 'w') as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\ndef mirror(name, repo):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n repo_url = get_repo_url(repo)\n try:\n os.makedirs(gitdir)\n tar_file = get_repo_name(repo_url) + '.tar'\n app.log(name, 'Try fetching tarball %s' % tar_file)\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)\n call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)\n os.remove(tar_file)\n call(['git', 'config', 'remote.origin.url', repo_url], stdout=\n fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.mirror', 'true'], stdout=\n fnull, stderr=fnull)\n if call(['git', 'config', 'remote.origin.fetch',\n '+refs/*:refs/*'], stdout=fnull, stderr=fnull) != 0:\n raise BaseException('Did not get a valid git repo')\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'Using git clone from ', repo_url)\n try:\n with open(os.devnull, 'w') as fnull:\n call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],\n stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'ERROR: failed to clone', repo)\n raise SystemExit\n app.log(name, 'Git repo is mirrored at', gitdir)\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\ndef checkout(name, repo, ref, checkoutdir):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n if not os.path.exists(gitdir):\n mirror(name, repo)\n app.log(name, 'Upstream version:', get_upstream_version(repo, ref))\n app.log(name, 'Git checkout %s in %s' % (repo, checkoutdir))\n with app.chdir(checkoutdir), open(os.devnull, 'w') as fnull:\n copy_repo(gitdir, checkoutdir)\n if call(['git', 'checkout', ref], stdout=fnull, stderr=fnull):\n app.log(name, 'ERROR: git checkout failed for', ref)\n raise SystemExit\n if os.path.exists('.gitmodules'):\n checkout_submodules(name, ref)\n utils.set_mtime_recursively(checkoutdir)\n\n\ndef checkout_submodules(name, ref):\n app.log(name, 'Git submodules')\n with open('.gitmodules', 'r') as gitfile:\n content = '\\n'.join([l.strip() for l in gitfile.read().splitlines()])\n io = StringIO.StringIO(content)\n parser = ConfigParser.RawConfigParser()\n parser.readfp(io)\n for section in parser.sections():\n submodule = re.sub('submodule \"(.*)\"', '\\\\1', section)\n url = parser.get(section, 'url')\n path = parser.get(section, 'path')\n try:\n commit = check_output(['git', 'ls-tree', ref, path])\n fields = commit.split()\n if len(fields) >= 2 and fields[1] == 'commit':\n submodule_commit = commit.split()[2]\n if len(submodule_commit) != 40:\n raise Exception\n fulldir = os.path.join(os.getcwd(), path)\n checkout(submodule, url, submodule_commit, fulldir)\n else:\n app.log(this, \n 'Skipping submodule \"%s\" as %s:%s has a non-commit object for it'\n % (name, url))\n except:\n app.log(name, 'ERROR: Git submodules problem')\n raise SystemExit\n",
"<import token>\n\n\ndef get_repo_url(repo):\n url = repo.replace('upstream:', 'git://git.baserock.org/delta/')\n url = url.replace('baserock:baserock/',\n 'git://git.baserock.org/baserock/baserock/')\n url = url.replace('freedesktop:', 'git://anongit.freedesktop.org/')\n url = url.replace('github:', 'git://github.com/')\n url = url.replace('gnome:', 'git://git.gnome.org')\n if url.endswith('.git'):\n url = url[:-4]\n return url\n\n\n<function token>\n\n\ndef get_upstream_version(repo, ref):\n try:\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n last_tag = check_output(['git', 'describe', '--abbrev=0',\n '--tags', ref], stderr=fnull)[0:-1]\n commits = check_output(['git', 'rev-list', last_tag + '..' +\n ref, '--count'])\n result = '%s (%s + %s commits)' % (ref[:8], last_tag, commits[0:-1])\n except:\n result = ref[:8] + ' ' + '(No tag found)'\n return result\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n \"\"\"Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n \"\"\"\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, 'w') as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'], stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'r') as ref_fh:\n pack_lines = ref_fh.read().split('\\n')\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'w') as ref_fh:\n ref_fh.write(pack_lines.pop(0) + '\\n')\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith('refs/heads/'):\n ref = 'refs/remotes/origin/' + ref[11:]\n refline = '%s %s' % (sha, ref)\n ref_fh.write('%s\\n' % refline)\n with open(os.devnull, 'w') as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\ndef mirror(name, repo):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n repo_url = get_repo_url(repo)\n try:\n os.makedirs(gitdir)\n tar_file = get_repo_name(repo_url) + '.tar'\n app.log(name, 'Try fetching tarball %s' % tar_file)\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)\n call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)\n os.remove(tar_file)\n call(['git', 'config', 'remote.origin.url', repo_url], stdout=\n fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.mirror', 'true'], stdout=\n fnull, stderr=fnull)\n if call(['git', 'config', 'remote.origin.fetch',\n '+refs/*:refs/*'], stdout=fnull, stderr=fnull) != 0:\n raise BaseException('Did not get a valid git repo')\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'Using git clone from ', repo_url)\n try:\n with open(os.devnull, 'w') as fnull:\n call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],\n stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'ERROR: failed to clone', repo)\n raise SystemExit\n app.log(name, 'Git repo is mirrored at', gitdir)\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\ndef checkout(name, repo, ref, checkoutdir):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n if not os.path.exists(gitdir):\n mirror(name, repo)\n app.log(name, 'Upstream version:', get_upstream_version(repo, ref))\n app.log(name, 'Git checkout %s in %s' % (repo, checkoutdir))\n with app.chdir(checkoutdir), open(os.devnull, 'w') as fnull:\n copy_repo(gitdir, checkoutdir)\n if call(['git', 'checkout', ref], stdout=fnull, stderr=fnull):\n app.log(name, 'ERROR: git checkout failed for', ref)\n raise SystemExit\n if os.path.exists('.gitmodules'):\n checkout_submodules(name, ref)\n utils.set_mtime_recursively(checkoutdir)\n\n\ndef checkout_submodules(name, ref):\n app.log(name, 'Git submodules')\n with open('.gitmodules', 'r') as gitfile:\n content = '\\n'.join([l.strip() for l in gitfile.read().splitlines()])\n io = StringIO.StringIO(content)\n parser = ConfigParser.RawConfigParser()\n parser.readfp(io)\n for section in parser.sections():\n submodule = re.sub('submodule \"(.*)\"', '\\\\1', section)\n url = parser.get(section, 'url')\n path = parser.get(section, 'path')\n try:\n commit = check_output(['git', 'ls-tree', ref, path])\n fields = commit.split()\n if len(fields) >= 2 and fields[1] == 'commit':\n submodule_commit = commit.split()[2]\n if len(submodule_commit) != 40:\n raise Exception\n fulldir = os.path.join(os.getcwd(), path)\n checkout(submodule, url, submodule_commit, fulldir)\n else:\n app.log(this, \n 'Skipping submodule \"%s\" as %s:%s has a non-commit object for it'\n % (name, url))\n except:\n app.log(name, 'ERROR: Git submodules problem')\n raise SystemExit\n",
"<import token>\n\n\ndef get_repo_url(repo):\n url = repo.replace('upstream:', 'git://git.baserock.org/delta/')\n url = url.replace('baserock:baserock/',\n 'git://git.baserock.org/baserock/baserock/')\n url = url.replace('freedesktop:', 'git://anongit.freedesktop.org/')\n url = url.replace('github:', 'git://github.com/')\n url = url.replace('gnome:', 'git://git.gnome.org')\n if url.endswith('.git'):\n url = url[:-4]\n return url\n\n\n<function token>\n\n\ndef get_upstream_version(repo, ref):\n try:\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n last_tag = check_output(['git', 'describe', '--abbrev=0',\n '--tags', ref], stderr=fnull)[0:-1]\n commits = check_output(['git', 'rev-list', last_tag + '..' +\n ref, '--count'])\n result = '%s (%s + %s commits)' % (ref[:8], last_tag, commits[0:-1])\n except:\n result = ref[:8] + ' ' + '(No tag found)'\n return result\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n \"\"\"Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n \"\"\"\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, 'w') as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'], stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'r') as ref_fh:\n pack_lines = ref_fh.read().split('\\n')\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'w') as ref_fh:\n ref_fh.write(pack_lines.pop(0) + '\\n')\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith('refs/heads/'):\n ref = 'refs/remotes/origin/' + ref[11:]\n refline = '%s %s' % (sha, ref)\n ref_fh.write('%s\\n' % refline)\n with open(os.devnull, 'w') as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\ndef mirror(name, repo):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n repo_url = get_repo_url(repo)\n try:\n os.makedirs(gitdir)\n tar_file = get_repo_name(repo_url) + '.tar'\n app.log(name, 'Try fetching tarball %s' % tar_file)\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)\n call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)\n os.remove(tar_file)\n call(['git', 'config', 'remote.origin.url', repo_url], stdout=\n fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.mirror', 'true'], stdout=\n fnull, stderr=fnull)\n if call(['git', 'config', 'remote.origin.fetch',\n '+refs/*:refs/*'], stdout=fnull, stderr=fnull) != 0:\n raise BaseException('Did not get a valid git repo')\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'Using git clone from ', repo_url)\n try:\n with open(os.devnull, 'w') as fnull:\n call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],\n stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'ERROR: failed to clone', repo)\n raise SystemExit\n app.log(name, 'Git repo is mirrored at', gitdir)\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\ndef checkout(name, repo, ref, checkoutdir):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n if not os.path.exists(gitdir):\n mirror(name, repo)\n app.log(name, 'Upstream version:', get_upstream_version(repo, ref))\n app.log(name, 'Git checkout %s in %s' % (repo, checkoutdir))\n with app.chdir(checkoutdir), open(os.devnull, 'w') as fnull:\n copy_repo(gitdir, checkoutdir)\n if call(['git', 'checkout', ref], stdout=fnull, stderr=fnull):\n app.log(name, 'ERROR: git checkout failed for', ref)\n raise SystemExit\n if os.path.exists('.gitmodules'):\n checkout_submodules(name, ref)\n utils.set_mtime_recursively(checkoutdir)\n\n\n<function token>\n",
"<import token>\n\n\ndef get_repo_url(repo):\n url = repo.replace('upstream:', 'git://git.baserock.org/delta/')\n url = url.replace('baserock:baserock/',\n 'git://git.baserock.org/baserock/baserock/')\n url = url.replace('freedesktop:', 'git://anongit.freedesktop.org/')\n url = url.replace('github:', 'git://github.com/')\n url = url.replace('gnome:', 'git://git.gnome.org')\n if url.endswith('.git'):\n url = url[:-4]\n return url\n\n\n<function token>\n\n\ndef get_upstream_version(repo, ref):\n try:\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n last_tag = check_output(['git', 'describe', '--abbrev=0',\n '--tags', ref], stderr=fnull)[0:-1]\n commits = check_output(['git', 'rev-list', last_tag + '..' +\n ref, '--count'])\n result = '%s (%s + %s commits)' % (ref[:8], last_tag, commits[0:-1])\n except:\n result = ref[:8] + ' ' + '(No tag found)'\n return result\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n \"\"\"Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n \"\"\"\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, 'w') as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'], stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'r') as ref_fh:\n pack_lines = ref_fh.read().split('\\n')\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'w') as ref_fh:\n ref_fh.write(pack_lines.pop(0) + '\\n')\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith('refs/heads/'):\n ref = 'refs/remotes/origin/' + ref[11:]\n refline = '%s %s' % (sha, ref)\n ref_fh.write('%s\\n' % refline)\n with open(os.devnull, 'w') as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\ndef mirror(name, repo):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n repo_url = get_repo_url(repo)\n try:\n os.makedirs(gitdir)\n tar_file = get_repo_name(repo_url) + '.tar'\n app.log(name, 'Try fetching tarball %s' % tar_file)\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)\n call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)\n os.remove(tar_file)\n call(['git', 'config', 'remote.origin.url', repo_url], stdout=\n fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.mirror', 'true'], stdout=\n fnull, stderr=fnull)\n if call(['git', 'config', 'remote.origin.fetch',\n '+refs/*:refs/*'], stdout=fnull, stderr=fnull) != 0:\n raise BaseException('Did not get a valid git repo')\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'Using git clone from ', repo_url)\n try:\n with open(os.devnull, 'w') as fnull:\n call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],\n stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'ERROR: failed to clone', repo)\n raise SystemExit\n app.log(name, 'Git repo is mirrored at', gitdir)\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef get_upstream_version(repo, ref):\n try:\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n last_tag = check_output(['git', 'describe', '--abbrev=0',\n '--tags', ref], stderr=fnull)[0:-1]\n commits = check_output(['git', 'rev-list', last_tag + '..' +\n ref, '--count'])\n result = '%s (%s + %s commits)' % (ref[:8], last_tag, commits[0:-1])\n except:\n result = ref[:8] + ' ' + '(No tag found)'\n return result\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n \"\"\"Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n \"\"\"\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, 'w') as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'], stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'r') as ref_fh:\n pack_lines = ref_fh.read().split('\\n')\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'w') as ref_fh:\n ref_fh.write(pack_lines.pop(0) + '\\n')\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith('refs/heads/'):\n ref = 'refs/remotes/origin/' + ref[11:]\n refline = '%s %s' % (sha, ref)\n ref_fh.write('%s\\n' % refline)\n with open(os.devnull, 'w') as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\ndef mirror(name, repo):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n repo_url = get_repo_url(repo)\n try:\n os.makedirs(gitdir)\n tar_file = get_repo_name(repo_url) + '.tar'\n app.log(name, 'Try fetching tarball %s' % tar_file)\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)\n call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)\n os.remove(tar_file)\n call(['git', 'config', 'remote.origin.url', repo_url], stdout=\n fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.mirror', 'true'], stdout=\n fnull, stderr=fnull)\n if call(['git', 'config', 'remote.origin.fetch',\n '+refs/*:refs/*'], stdout=fnull, stderr=fnull) != 0:\n raise BaseException('Did not get a valid git repo')\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'Using git clone from ', repo_url)\n try:\n with open(os.devnull, 'w') as fnull:\n call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],\n stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'ERROR: failed to clone', repo)\n raise SystemExit\n app.log(name, 'Git repo is mirrored at', gitdir)\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n \"\"\"Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n \"\"\"\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, 'w') as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'], stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'r') as ref_fh:\n pack_lines = ref_fh.read().split('\\n')\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'w') as ref_fh:\n ref_fh.write(pack_lines.pop(0) + '\\n')\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith('refs/heads/'):\n ref = 'refs/remotes/origin/' + ref[11:]\n refline = '%s %s' % (sha, ref)\n ref_fh.write('%s\\n' % refline)\n with open(os.devnull, 'w') as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\ndef mirror(name, repo):\n gitdir = os.path.join(app.settings['gits'], get_repo_name(repo))\n repo_url = get_repo_url(repo)\n try:\n os.makedirs(gitdir)\n tar_file = get_repo_name(repo_url) + '.tar'\n app.log(name, 'Try fetching tarball %s' % tar_file)\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n call(['wget', app['tar-url']], stdout=fnull, stderr=fnull)\n call(['tar', 'xf', tar_file], stdout=fnull, stderr=fnull)\n os.remove(tar_file)\n call(['git', 'config', 'remote.origin.url', repo_url], stdout=\n fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.mirror', 'true'], stdout=\n fnull, stderr=fnull)\n if call(['git', 'config', 'remote.origin.fetch',\n '+refs/*:refs/*'], stdout=fnull, stderr=fnull) != 0:\n raise BaseException('Did not get a valid git repo')\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'Using git clone from ', repo_url)\n try:\n with open(os.devnull, 'w') as fnull:\n call(['git', 'clone', '--mirror', '-n', repo_url, gitdir],\n stdout=fnull, stderr=fnull)\n except:\n app.log(name, 'ERROR: failed to clone', repo)\n raise SystemExit\n app.log(name, 'Git repo is mirrored at', gitdir)\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\ndef copy_repo(repo, destdir):\n \"\"\"Copies a cached repository into a directory using cp.\n\n This also fixes up the repository afterwards, so that it can contain\n code etc. It does not leave any given branch ready for use.\n\n \"\"\"\n call(['cp', '-a', repo, os.path.join(destdir, '.git')])\n call(['git', 'config', 'core.bare', 'false'])\n call(['git', 'config', '--unset', 'remote.origin.mirror'])\n with open(os.devnull, 'w') as fnull:\n call(['git', 'config', 'remote.origin.fetch',\n '+refs/heads/*:refs/remotes/origin/*'], stdout=fnull, stderr=fnull)\n call(['git', 'config', 'remote.origin.url', repo])\n call(['git', 'pack-refs', '--all', '--prune'])\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'r') as ref_fh:\n pack_lines = ref_fh.read().split('\\n')\n with open(os.path.join(destdir, '.git', 'packed-refs'), 'w') as ref_fh:\n ref_fh.write(pack_lines.pop(0) + '\\n')\n for refline in pack_lines:\n if ' refs/remotes/' in refline:\n continue\n if ' refs/heads/' in refline:\n sha, ref = refline[:40], refline[41:]\n if ref.startswith('refs/heads/'):\n ref = 'refs/remotes/origin/' + ref[11:]\n refline = '%s %s' % (sha, ref)\n ref_fh.write('%s\\n' % refline)\n with open(os.devnull, 'w') as fnull:\n call(['git', 'remote', 'update', 'origin', '--prune'], stdout=fnull,\n stderr=fnull)\n\n\n<function token>\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_tree(this):\n ref = this['ref']\n gitdir = os.path.join(app.settings['gits'], get_repo_name(this['repo']))\n if not os.path.exists(gitdir):\n try:\n url = app.settings['cache-server-url'] + 'repo=' + get_repo_url(\n this['repo']) + '&ref=' + ref\n with urllib2.urlopen(url) as response:\n tree = json.loads(response.read().decode())['tree']\n return tree\n except:\n app.log(this, 'WARNING: no tree from cache-server', ref)\n mirror(this['name'], this['repo'])\n with app.chdir(gitdir), open(os.devnull, 'w') as fnull:\n if call(['git', 'rev-parse', ref + '^{object}'], stdout=fnull,\n stderr=fnull):\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n try:\n tree = check_output(['git', 'rev-parse', ref + '^{tree}'],\n universal_newlines=True)[0:-1]\n return tree\n except:\n app.log(this, 'ERROR: could not find tree for ref', ref)\n raise SystemExit\n\n\n<function token>\n<function token>\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef fetch(repo):\n with app.chdir(repo), open(os.devnull, 'w') as fnull:\n call(['git', 'fetch', 'origin'], stdout=fnull, stderr=fnull)\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
539 |
68a503b2a94304530e20d79baf9fb094024ba67e
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
dajaxice_autodiscover()
from spoticle import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'spoticle.views.home', name='home'),
# url(r'^spoticle/', include('spoticle.foo.urls')),
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^play/$', views.index, name='play'),
url(r'^compose/$', views.compose, name='compose'),
url(r'^random/$', views.random, name='random'),
url(r'^play/(?P<pk>\d+)/$', views.DetailView.as_view(), name='quiz'),
url(r'^compose/(?P<pk>\d+)/$', views.UpdateView.as_view()),
url(r'^clip/(?P<clip_id>\d+)/$', views.clip, name='clip'),
# Auth
url(r'^accounts/login/$', 'django.contrib.auth.views.login', { 'template_name': 'login.html', 'extra_context': { 'next': '/' }}, name='login'),
url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', { 'next_page': '/' }, name='logout'),
url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"from django.conf.urls import patterns, include, url\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom dajaxice.core import dajaxice_autodiscover, dajaxice_config\ndajaxice_autodiscover()\n\nfrom spoticle import views\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'spoticle.views.home', name='home'),\n # url(r'^spoticle/', include('spoticle.foo.urls')),\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^play/$', views.index, name='play'),\n url(r'^compose/$', views.compose, name='compose'),\n url(r'^random/$', views.random, name='random'),\n url(r'^play/(?P<pk>\\d+)/$', views.DetailView.as_view(), name='quiz'),\n url(r'^compose/(?P<pk>\\d+)/$', views.UpdateView.as_view()),\n\n url(r'^clip/(?P<clip_id>\\d+)/$', views.clip, name='clip'),\n\n # Auth\n url(r'^accounts/login/$', 'django.contrib.auth.views.login', { 'template_name': 'login.html', 'extra_context': { 'next': '/' }}, name='login'),\n url(r'^accounts/logout/$', 'django.contrib.auth.views.logout', { 'next_page': '/' }, name='logout'),\n\n url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n)\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom dajaxice.core import dajaxice_autodiscover, dajaxice_config\ndajaxice_autodiscover()\nfrom spoticle import views\nurlpatterns = patterns('', url('^$', views.IndexView.as_view(), name=\n 'index'), url('^play/$', views.index, name='play'), url('^compose/$',\n views.compose, name='compose'), url('^random/$', views.random, name=\n 'random'), url('^play/(?P<pk>\\\\d+)/$', views.DetailView.as_view(), name\n ='quiz'), url('^compose/(?P<pk>\\\\d+)/$', views.UpdateView.as_view()),\n url('^clip/(?P<clip_id>\\\\d+)/$', views.clip, name='clip'), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html', 'extra_context': {'next': '/'}}, name=\n 'login'), url('^accounts/logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'), url(dajaxice_config.dajaxice_url,\n include('dajaxice.urls')), url('^admin/doc/', include(\n 'django.contrib.admindocs.urls')), url('^admin/', include(admin.site.urls))\n )\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"<import token>\nadmin.autodiscover()\n<import token>\ndajaxice_autodiscover()\n<import token>\nurlpatterns = patterns('', url('^$', views.IndexView.as_view(), name=\n 'index'), url('^play/$', views.index, name='play'), url('^compose/$',\n views.compose, name='compose'), url('^random/$', views.random, name=\n 'random'), url('^play/(?P<pk>\\\\d+)/$', views.DetailView.as_view(), name\n ='quiz'), url('^compose/(?P<pk>\\\\d+)/$', views.UpdateView.as_view()),\n url('^clip/(?P<clip_id>\\\\d+)/$', views.clip, name='clip'), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html', 'extra_context': {'next': '/'}}, name=\n 'login'), url('^accounts/logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'), url(dajaxice_config.dajaxice_url,\n include('dajaxice.urls')), url('^admin/doc/', include(\n 'django.contrib.admindocs.urls')), url('^admin/', include(admin.site.urls))\n )\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"<import token>\nadmin.autodiscover()\n<import token>\ndajaxice_autodiscover()\n<import token>\n<assignment token>\nurlpatterns += staticfiles_urlpatterns()\nurlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"<import token>\n<code token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
540 |
81dec10686b521dc9400a209caabc1601efd2a88
|
# -*- coding: utf-8 -*-
import abc
import datetime
import importlib
import inspect
import os
import re
import six
from .library import HalLibrary
@six.add_metaclass(abc.ABCMeta)
class Hal():
def __init__(self, configpath):
self.configpath = configpath
# Find libraries inside the lib directory
dir_path = os.path.join(os.path.dirname(__file__), "libraries")
lib_files = [f for f in os.listdir(dir_path) if
os.path.isfile(os.path.join(dir_path, f)) and
f.lower().endswith(".py")
]
self.responses = []
self.libraries = []
for f in lib_files:
# Try to load the module
try:
module_name = "hal.libraries." + f[:-3]
module = importlib.import_module(module_name)
for name, obj in inspect.getmembers(module):
# Find classes that inherit from HalLibrary
if inspect.isclass(obj) and issubclass(obj, HalLibrary) and \
name != "HalLibrary" and not inspect.isabstract(obj):
self.libraries.append(obj)
except:
self.add_response("Error loading library {}".format(f))
raise
def add_response(self, text):
self.responses.append(text)
def say_all(self):
response = "\n".join(self.responses)
return response
@abc.abstractmethod
def display_help(self):
""" Present some information to the user """
pass
def greet(self):
hour = datetime.datetime.now().hour
greeting = "Good Evening"
if hour < 12:
greeting = 'Good morning'
elif 12 <= hour < 18:
greeting = 'Good afternoon'
self.add_response("{}. What can I help you with?".format(greeting))
def process(self, command):
"""
Process the command and get response by querying each plugin if required.
"""
self.responses = []
if(len(command) == 0):
self.greet()
return self.say_all()
# prepare the command
command = command.strip()
# Some hard coded patterns: If first word is help, activate help
# moudule
help_regex = re.compile("help\s+([^\s]+)")
help_match = help_regex.match(command)
if help_match:
keyword = help_match.group(1).lower()
# Try to find libraries with the keyword and print their help
for lib in self.libraries:
if keyword in lib.keywords:
# Print the help text
help_content = lib.help()
self.display_help(help_content)
return
matched = False
for lib in self.libraries:
lib_obj = lib(command)
# try to match the command with the library
lib_obj.process_input()
if lib_obj.status == HalLibrary.SUCCESS or lib_obj.status == HalLibrary.INCOMPLETE:
matched = True
lib_obj.process()
resp = lib_obj.get_response()
for r in resp:
self.add_response(r)
elif lib_obj.status == HalLibrary.ERROR:
matched = True
self.add_response("ERROR: " + lib_obj.get_error())
else:
# Failure to match
pass
if not matched:
self.add_response("I don't understand what you're saying.")
return self.say_all()
|
[
"# -*- coding: utf-8 -*-\n\nimport abc\nimport datetime\nimport importlib\nimport inspect\nimport os\nimport re\nimport six\n\nfrom .library import HalLibrary\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal():\n\n def __init__(self, configpath):\n self.configpath = configpath\n # Find libraries inside the lib directory\n\n dir_path = os.path.join(os.path.dirname(__file__), \"libraries\")\n lib_files = [f for f in os.listdir(dir_path) if\n os.path.isfile(os.path.join(dir_path, f)) and\n f.lower().endswith(\".py\")\n ]\n\n self.responses = []\n self.libraries = []\n for f in lib_files:\n # Try to load the module\n try:\n module_name = \"hal.libraries.\" + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n # Find classes that inherit from HalLibrary\n if inspect.isclass(obj) and issubclass(obj, HalLibrary) and \\\n name != \"HalLibrary\" and not inspect.isabstract(obj):\n self.libraries.append(obj)\n except:\n self.add_response(\"Error loading library {}\".format(f))\n raise\n\n def add_response(self, text):\n self.responses.append(text)\n\n def say_all(self):\n response = \"\\n\".join(self.responses)\n return response\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n\n def greet(self):\n hour = datetime.datetime.now().hour\n\n greeting = \"Good Evening\"\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n\n self.add_response(\"{}. What can I help you with?\".format(greeting))\n\n def process(self, command):\n \"\"\"\n Process the command and get response by querying each plugin if required.\n \"\"\"\n self.responses = []\n if(len(command) == 0):\n self.greet()\n return self.say_all()\n\n # prepare the command\n command = command.strip()\n\n # Some hard coded patterns: If first word is help, activate help\n # moudule\n help_regex = re.compile(\"help\\s+([^\\s]+)\")\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n # Try to find libraries with the keyword and print their help\n\n for lib in self.libraries:\n if keyword in lib.keywords:\n # Print the help text\n help_content = lib.help()\n self.display_help(help_content)\n return\n\n matched = False\n\n for lib in self.libraries:\n\n lib_obj = lib(command)\n\n # try to match the command with the library\n lib_obj.process_input()\n\n if lib_obj.status == HalLibrary.SUCCESS or lib_obj.status == HalLibrary.INCOMPLETE:\n\n matched = True\n\n lib_obj.process()\n\n resp = lib_obj.get_response()\n\n for r in resp:\n self.add_response(r)\n\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response(\"ERROR: \" + lib_obj.get_error())\n else:\n # Failure to match\n pass\n\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n\n return self.say_all()\n",
"import abc\nimport datetime\nimport importlib\nimport inspect\nimport os\nimport re\nimport six\nfrom .library import HalLibrary\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n\n def add_response(self, text):\n self.responses.append(text)\n\n def say_all(self):\n response = '\\n'.join(self.responses)\n return response\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n def greet(self):\n hour = datetime.datetime.now().hour\n greeting = 'Good Evening'\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n self.add_response('{}. What can I help you with?'.format(greeting))\n\n def process(self, command):\n \"\"\"\n Process the command and get response by querying each plugin if required.\n \"\"\"\n self.responses = []\n if len(command) == 0:\n self.greet()\n return self.say_all()\n command = command.strip()\n help_regex = re.compile('help\\\\s+([^\\\\s]+)')\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n for lib in self.libraries:\n if keyword in lib.keywords:\n help_content = lib.help()\n self.display_help(help_content)\n return\n matched = False\n for lib in self.libraries:\n lib_obj = lib(command)\n lib_obj.process_input()\n if (lib_obj.status == HalLibrary.SUCCESS or lib_obj.status ==\n HalLibrary.INCOMPLETE):\n matched = True\n lib_obj.process()\n resp = lib_obj.get_response()\n for r in resp:\n self.add_response(r)\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response('ERROR: ' + lib_obj.get_error())\n else:\n pass\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n return self.say_all()\n",
"<import token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n\n def add_response(self, text):\n self.responses.append(text)\n\n def say_all(self):\n response = '\\n'.join(self.responses)\n return response\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n def greet(self):\n hour = datetime.datetime.now().hour\n greeting = 'Good Evening'\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n self.add_response('{}. What can I help you with?'.format(greeting))\n\n def process(self, command):\n \"\"\"\n Process the command and get response by querying each plugin if required.\n \"\"\"\n self.responses = []\n if len(command) == 0:\n self.greet()\n return self.say_all()\n command = command.strip()\n help_regex = re.compile('help\\\\s+([^\\\\s]+)')\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n for lib in self.libraries:\n if keyword in lib.keywords:\n help_content = lib.help()\n self.display_help(help_content)\n return\n matched = False\n for lib in self.libraries:\n lib_obj = lib(command)\n lib_obj.process_input()\n if (lib_obj.status == HalLibrary.SUCCESS or lib_obj.status ==\n HalLibrary.INCOMPLETE):\n matched = True\n lib_obj.process()\n resp = lib_obj.get_response()\n for r in resp:\n self.add_response(r)\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response('ERROR: ' + lib_obj.get_error())\n else:\n pass\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n return self.say_all()\n",
"<import token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n <function token>\n\n def say_all(self):\n response = '\\n'.join(self.responses)\n return response\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n def greet(self):\n hour = datetime.datetime.now().hour\n greeting = 'Good Evening'\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n self.add_response('{}. What can I help you with?'.format(greeting))\n\n def process(self, command):\n \"\"\"\n Process the command and get response by querying each plugin if required.\n \"\"\"\n self.responses = []\n if len(command) == 0:\n self.greet()\n return self.say_all()\n command = command.strip()\n help_regex = re.compile('help\\\\s+([^\\\\s]+)')\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n for lib in self.libraries:\n if keyword in lib.keywords:\n help_content = lib.help()\n self.display_help(help_content)\n return\n matched = False\n for lib in self.libraries:\n lib_obj = lib(command)\n lib_obj.process_input()\n if (lib_obj.status == HalLibrary.SUCCESS or lib_obj.status ==\n HalLibrary.INCOMPLETE):\n matched = True\n lib_obj.process()\n resp = lib_obj.get_response()\n for r in resp:\n self.add_response(r)\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response('ERROR: ' + lib_obj.get_error())\n else:\n pass\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n return self.say_all()\n",
"<import token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n <function token>\n <function token>\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n\n def greet(self):\n hour = datetime.datetime.now().hour\n greeting = 'Good Evening'\n if hour < 12:\n greeting = 'Good morning'\n elif 12 <= hour < 18:\n greeting = 'Good afternoon'\n self.add_response('{}. What can I help you with?'.format(greeting))\n\n def process(self, command):\n \"\"\"\n Process the command and get response by querying each plugin if required.\n \"\"\"\n self.responses = []\n if len(command) == 0:\n self.greet()\n return self.say_all()\n command = command.strip()\n help_regex = re.compile('help\\\\s+([^\\\\s]+)')\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n for lib in self.libraries:\n if keyword in lib.keywords:\n help_content = lib.help()\n self.display_help(help_content)\n return\n matched = False\n for lib in self.libraries:\n lib_obj = lib(command)\n lib_obj.process_input()\n if (lib_obj.status == HalLibrary.SUCCESS or lib_obj.status ==\n HalLibrary.INCOMPLETE):\n matched = True\n lib_obj.process()\n resp = lib_obj.get_response()\n for r in resp:\n self.add_response(r)\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response('ERROR: ' + lib_obj.get_error())\n else:\n pass\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n return self.say_all()\n",
"<import token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n <function token>\n <function token>\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n <function token>\n\n def process(self, command):\n \"\"\"\n Process the command and get response by querying each plugin if required.\n \"\"\"\n self.responses = []\n if len(command) == 0:\n self.greet()\n return self.say_all()\n command = command.strip()\n help_regex = re.compile('help\\\\s+([^\\\\s]+)')\n help_match = help_regex.match(command)\n if help_match:\n keyword = help_match.group(1).lower()\n for lib in self.libraries:\n if keyword in lib.keywords:\n help_content = lib.help()\n self.display_help(help_content)\n return\n matched = False\n for lib in self.libraries:\n lib_obj = lib(command)\n lib_obj.process_input()\n if (lib_obj.status == HalLibrary.SUCCESS or lib_obj.status ==\n HalLibrary.INCOMPLETE):\n matched = True\n lib_obj.process()\n resp = lib_obj.get_response()\n for r in resp:\n self.add_response(r)\n elif lib_obj.status == HalLibrary.ERROR:\n matched = True\n self.add_response('ERROR: ' + lib_obj.get_error())\n else:\n pass\n if not matched:\n self.add_response(\"I don't understand what you're saying.\")\n return self.say_all()\n",
"<import token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n <function token>\n <function token>\n\n @abc.abstractmethod\n def display_help(self):\n \"\"\" Present some information to the user \"\"\"\n pass\n <function token>\n <function token>\n",
"<import token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal:\n\n def __init__(self, configpath):\n self.configpath = configpath\n dir_path = os.path.join(os.path.dirname(__file__), 'libraries')\n lib_files = [f for f in os.listdir(dir_path) if os.path.isfile(os.\n path.join(dir_path, f)) and f.lower().endswith('.py')]\n self.responses = []\n self.libraries = []\n for f in lib_files:\n try:\n module_name = 'hal.libraries.' + f[:-3]\n module = importlib.import_module(module_name)\n for name, obj in inspect.getmembers(module):\n if inspect.isclass(obj) and issubclass(obj, HalLibrary\n ) and name != 'HalLibrary' and not inspect.isabstract(\n obj):\n self.libraries.append(obj)\n except:\n self.add_response('Error loading library {}'.format(f))\n raise\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass Hal:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
541 |
66edf0d2f7e25e166563bdb1063a1ed45ecda0e6
|
Easy = [["4 + 12 = ?", 16],
["45 -34 = ?", 11],
["27 + 12 -18 = ?", 21],
['25 - 5 * 4 = ?', 5],
["18 + 45 / 5 - 3 * 2 = ?", 21],
["5! = ?", 120],
["3! + 2! = ?", 8],
["7 + 5! / 4! - 6 / 3 = ?", 10],
["(25 + 5) / 6 * 4 = ?", 20],
["4(3+c)+c=c+4; c=?", -2],
["√121 = ?" ,11],
["x = √81 - √64; x= ?", 1],
["x + y = 20; x - y = 4; y = ?", 8]]
Normal = [["8(10−k)=2k; k = ?", 8],
["−4n−8=4(−3n+2); n=?", 2],
["4(3+c)+c=c+4; c=?", -2],
["√121 = ?" ,11],
["x = √81 - √64; x= ?", 1],
["y = √16 * √4 / √9; y=?", 2],
["y−3=2(x+1); x= -2, y=?", 1],
[" y*y = 4x/5 − 11; y= 5, x = ?", 45],
["How many unique ways are there to arrange the letters in the word CANNON?", 120],
["How many numbers between 1 and 100(inclusive) are divisible by 10 or 7", 23],
["y=−4x+6; 3x+4y=-2 ; x=?", 2],
["−x+4y=−9; y=−2x-9; y=?", -3]]
Hard = [["Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?", 20],
["You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?", 10],
["Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?", 126],
["How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?" ,67],
["You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?", 12],
["You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?", 2],
["y−3=2(x+1); x= -2, y=?", 48],
["How many unique ways are there to arrange the letters in the word CANNON?", 120],
["How many numbers between 1 and 100(inclusive) are divisible by 10 or 7", 23],
["−x+4y=−9; y=−2x-9; y=?", -3],
["x = √81 - √64; x= ?", 1],
["y = √16 * √4 / √9; y=?", 2],
["y−3=2(x+1); x= -2, y=?", 1],
[" y*y = 4x/5 − 11; y= 5, x = ?", 45],
["y=−4x+6; 3x+4y=-2 ; x=?", 2],
["−x+4y=−9; y=−2x-9; y=?", -3]]
|
[
"Easy = [[\"4 + 12 = ?\", 16],\r\n [\"45 -34 = ?\", 11],\r\n [\"27 + 12 -18 = ?\", 21],\r\n ['25 - 5 * 4 = ?', 5],\r\n [\"18 + 45 / 5 - 3 * 2 = ?\", 21],\r\n [\"5! = ?\", 120],\r\n [\"3! + 2! = ?\", 8],\r\n [\"7 + 5! / 4! - 6 / 3 = ?\", 10],\r\n [\"(25 + 5) / 6 * 4 = ?\", 20],\r\n [\"4(3+c)+c=c+4; c=?\", -2],\r\n [\"√121 = ?\" ,11],\r\n [\"x = √81 - √64; x= ?\", 1],\r\n [\"x + y = 20; x - y = 4; y = ?\", 8]]\r\n\r\nNormal = [[\"8(10−k)=2k; k = ?\", 8],\r\n [\"−4n−8=4(−3n+2); n=?\", 2],\r\n [\"4(3+c)+c=c+4; c=?\", -2],\r\n [\"√121 = ?\" ,11],\r\n [\"x = √81 - √64; x= ?\", 1],\r\n [\"y = √16 * √4 / √9; y=?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 1],\r\n [\" y*y = 4x/5 − 11; y= 5, x = ?\", 45],\r\n [\"How many unique ways are there to arrange the letters in the word CANNON?\", 120],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 10 or 7\", 23],\r\n [\"y=−4x+6; 3x+4y=-2 ; x=?\", 2],\r\n [\"−x+4y=−9; y=−2x-9; y=?\", -3]]\r\n\r\nHard = [[\"Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?\", 20],\r\n [\"You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?\", 10],\r\n [\"Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?\", 126],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?\" ,67],\r\n [\"You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\", 12],\r\n [\"You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 48],\r\n [\"How many unique ways are there to arrange the letters in the word CANNON?\", 120],\r\n [\"How many numbers between 1 and 100(inclusive) are divisible by 10 or 7\", 23],\r\n [\"−x+4y=−9; y=−2x-9; y=?\", -3],\r\n [\"x = √81 - √64; x= ?\", 1],\r\n [\"y = √16 * √4 / √9; y=?\", 2],\r\n [\"y−3=2(x+1); x= -2, y=?\", 1],\r\n [\" y*y = 4x/5 − 11; y= 5, x = ?\", 45],\r\n [\"y=−4x+6; 3x+4y=-2 ; x=?\", 2],\r\n [\"−x+4y=−9; y=−2x-9; y=?\", -3]]\r\n",
"Easy = [['4 + 12 = ?', 16], ['45 -34 = ?', 11], ['27 + 12 -18 = ?', 21], [\n '25 - 5 * 4 = ?', 5], ['18 + 45 / 5 - 3 * 2 = ?', 21], ['5! = ?', 120],\n ['3! + 2! = ?', 8], ['7 + 5! / 4! - 6 / 3 = ?', 10], [\n '(25 + 5) / 6 * 4 = ?', 20], ['4(3+c)+c=c+4; c=?', -2], [\n '\\u200b√\\u200b121 = ?', 11], ['x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'x + y = 20; x - y = 4; y = ?', 8]]\nNormal = [['8(10−k)=2k; k = ?', 8], ['−4n−8=4(−3n+2); n=?', 2], [\n '4(3+c)+c=c+4; c=?', -2], ['\\u200b√\\u200b121 = ?', 11], [\n 'x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'y = √\\u200b16 * √\\u200b4 / √\\u200b9; y=?', 2], [\n 'y−3=2(x+1); x= -2, y=?', 1], [' y*y = 4x/5 − 11; y= 5, x = ?', 45], [\n 'How many unique ways are there to arrange the letters in the word CANNON?'\n , 120], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 10 or 7',\n 23], ['y=−4x+6; \\u200b3x+4y=-2 ; x=?', 2], [\n '−x+4y=−9; \\u200by=−2x-9; y=?', -3]]\nHard = [[\n 'Emily is packing her bags for her vacation. She has 6 unique Fabergé eggs, but only 3 fit in her bag. How many different groups of 3 Fabergé eggs can she take?'\n , 20], [\n 'You just got a free ticket for a boat ride, and you can bring along 2 friends! Unfortunately, you have 5 friends who want to come along. How many different groups of friends could you take with you?'\n , 10], [\n 'Omar is packing his bags for his vacation. He has 999 unique shirts, but only 5 fit in his bag. How many different groups of 5 shirts can he take?'\n , 126], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 3 or 2?',\n 67], [\n \"You need to put your reindeer, Gloopin, Quentin, Ezekiel, and Lancer, in a single-file line to pull your sleigh. However, Quentin and Gloopin are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\"\n , 12], [\n \"You need to put your reindeer, Gloopin, Balthazar, Bloopin, Prancer, and Quentin, in a single-file line to pull your sleigh. However, Prancer and Balthazar are best friends, so you have to put them next to each other, or they won't fly. How many ways can you arrange your reindeer?\"\n , 2], ['y−3=2(x+1); x= -2, y=?', 48], [\n 'How many unique ways are there to arrange the letters in the word CANNON?'\n , 120], [\n 'How many numbers between 1 and 100(inclusive) are divisible by 10 or 7',\n 23], ['−x+4y=−9; \\u200by=−2x-9; y=?', -3], [\n 'x = √\\u200b81 - √\\u200b64; x= ?', 1], [\n 'y = √\\u200b16 * √\\u200b4 / √\\u200b9; y=?', 2], [\n 'y−3=2(x+1); x= -2, y=?', 1], [' y*y = 4x/5 − 11; y= 5, x = ?', 45], [\n 'y=−4x+6; \\u200b3x+4y=-2 ; x=?', 2], ['−x+4y=−9; \\u200by=−2x-9; y=?', -3]]\n",
"<assignment token>\n"
] | false |
542 |
7d099012584b84e9767bf0ce9d9df1596ca3bbab
|
# Set up path references and dependencies.
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
sys.path.append(os.path.join(parentdir, "utils"))
# Import important helper libraries.
from flask import Flask, render_template
import numpy as np
import plotly
import plotly.graph_objs as pgo
import json
# Import modules created to serve the project.
#from utils import DB_interface as DBI
#from utils import path_config as pc
from utils import model
app = Flask(__name__)
# Global variable
#DAYS = 500
@app.route('/')
def index():
result_plot = compute_model_output()
return render_template("index.html", graphJSON=result_plot)
def compute_model_output():
num_steps = 500
init_inf = 5
t_inc = 5
t_inf = 9
r_t = 2.5 #np.random.normal(2.5, 1.0)
rho = 1.0
kappa_0 = 0.0
kappa = 0.0
n_pop = 2000
seir = model.SEIRModel(num_steps,n_pop, init_inf, t_inc, t_inf, r_t, rho, kappa_0, kappa)
s, e, i, r = seir.run()
days = np.linspace(0, num_steps, num_steps)
trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(color='rgba(128, 223, 255, 1)'))
trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(color='rgba(200, 100, 0, 1)'))
trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(color='rgba(180, 0, 0, 1)'))
trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(color='rgba(0, 100, 50, 1)'))
data = [trace_0, trace_1, trace_2, trace_3]
graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
return (graphJSON)
"""
@app.callback(
Output('test','children')
[Input('val_num_steps', 'num_steps')]
)
@app.route('/start_bckgrnd_update')
def start_bckgrnd_update():
p = Process(target=bckgrnd_update, name="background_update")
p.start()
#p.join()
now = datetime.now()
user = {'username': 'MSE!'}
posts = [
{
'author': {'username': 'Paul'},
'body': 'Henrik has the update just been started?'
},
{
'author': {'username': 'Henrik'},
'body': 'You bet your sweet ass it has!'
},
{
'author': {'username': 'Paul'},
'body': 'So what time was is when it started?'
},
{
'author': {'username': 'Henrik'},
'body': 'It was exactly %s !' % now
}
]
return render_template("start_bckgrnd_update.html", title="home", user = user, posts=posts)
def bckgrnd_update():
global updating
updating = True
while updating:
print(datetime.now())
print("updating RKI DBs now")
DB = DBI.DB_interface()
DB.update_RKI_csv()
DB.update_RKI_landkreise_csv()
day = 24 * 3600
time.sleep(day)
"""
if __name__ == "__main__":
app.run(debug=True)
|
[
"# Set up path references and dependencies.\nimport os, sys, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, \"utils\"))\n\n# Import important helper libraries.\nfrom flask import Flask, render_template\nimport numpy as np\n\nimport plotly\nimport plotly.graph_objs as pgo\nimport json\n\n# Import modules created to serve the project.\n#from utils import DB_interface as DBI\n#from utils import path_config as pc\nfrom utils import model\n\napp = Flask(__name__)\n\n# Global variable\n#DAYS = 500\n\[email protected]('/')\ndef index():\n result_plot = compute_model_output()\n return render_template(\"index.html\", graphJSON=result_plot)\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5 #np.random.normal(2.5, 1.0)\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n\n n_pop = 2000\n\n seir = model.SEIRModel(num_steps,n_pop, init_inf, t_inc, t_inf, r_t, rho, kappa_0, kappa)\n\n s, e, i, r = seir.run()\n\n days = np.linspace(0, num_steps, num_steps)\n\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(color='rgba(0, 100, 50, 1)'))\n\n data = [trace_0, trace_1, trace_2, trace_3]\n\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n\n return (graphJSON)\n\n\"\"\"\[email protected](\n Output('test','children')\n [Input('val_num_steps', 'num_steps')]\n)\n\n\[email protected]('/start_bckgrnd_update')\ndef start_bckgrnd_update():\n p = Process(target=bckgrnd_update, name=\"background_update\")\n p.start()\n #p.join()\n now = datetime.now()\n user = {'username': 'MSE!'}\n posts = [\n {\n 'author': {'username': 'Paul'},\n 'body': 'Henrik has the update just been started?'\n },\n {\n 'author': {'username': 'Henrik'},\n 'body': 'You bet your sweet ass it has!'\n },\n {\n 'author': {'username': 'Paul'},\n 'body': 'So what time was is when it started?'\n },\n {\n 'author': {'username': 'Henrik'},\n 'body': 'It was exactly %s !' % now\n }\n\n ]\n return render_template(\"start_bckgrnd_update.html\", title=\"home\", user = user, posts=posts)\n\ndef bckgrnd_update():\n global updating\n updating = True\n while updating:\n print(datetime.now())\n print(\"updating RKI DBs now\")\n DB = DBI.DB_interface()\n DB.update_RKI_csv()\n DB.update_RKI_landkreise_csv()\n day = 24 * 3600\n time.sleep(day)\n\"\"\"\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n",
"import os, sys, inspect\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.\n currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\nfrom flask import Flask, render_template\nimport numpy as np\nimport plotly\nimport plotly.graph_objs as pgo\nimport json\nfrom utils import model\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<docstring token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.\n currentframe())))\nparentdir = os.path.dirname(currentdir)\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\n<import token>\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<docstring token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n<assignment token>\nsys.path.insert(0, parentdir)\nsys.path.append(os.path.join(parentdir, 'utils'))\n<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<docstring token>\nif __name__ == '__main__':\n app.run(debug=True)\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\ndef compute_model_output():\n num_steps = 500\n init_inf = 5\n t_inc = 5\n t_inf = 9\n r_t = 2.5\n rho = 1.0\n kappa_0 = 0.0\n kappa = 0.0\n n_pop = 2000\n seir = model.SEIRModel(num_steps, n_pop, init_inf, t_inc, t_inf, r_t,\n rho, kappa_0, kappa)\n s, e, i, r = seir.run()\n days = np.linspace(0, num_steps, num_steps)\n trace_0 = pgo.Scatter(x=days, y=s, mode='lines', name='s', line=dict(\n color='rgba(128, 223, 255, 1)'))\n trace_1 = pgo.Scatter(x=days, y=e, mode='lines', name='e', line=dict(\n color='rgba(200, 100, 0, 1)'))\n trace_2 = pgo.Scatter(x=days, y=i, mode='lines', name='i', line=dict(\n color='rgba(180, 0, 0, 1)'))\n trace_3 = pgo.Scatter(x=days, y=r, mode='lines', name='r', line=dict(\n color='rgba(0, 100, 50, 1)'))\n data = [trace_0, trace_1, trace_2, trace_3]\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\n<docstring token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\[email protected]('/')\ndef index():\n result_plot = compute_model_output()\n return render_template('index.html', graphJSON=result_plot)\n\n\n<function token>\n<docstring token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<docstring token>\n<code token>\n"
] | false |
543 |
169ad888e7629faff9509399ac7ead7a149a9602
|
import TryItYourSelf_9_8 as userObj
print('\n\n\n\n')
admin1 = userObj.Admin('john','deer',30)
admin1.describe_user()
print('\n')
admin1.set_user_name('Reven10')
print('\n')
admin1.describe_user()
admin1.privileges.show_privileges()
|
[
"import TryItYourSelf_9_8 as userObj\n\nprint('\\n\\n\\n\\n')\nadmin1 = userObj.Admin('john','deer',30)\nadmin1.describe_user()\nprint('\\n')\n\nadmin1.set_user_name('Reven10')\nprint('\\n')\nadmin1.describe_user()\nadmin1.privileges.show_privileges()\n",
"import TryItYourSelf_9_8 as userObj\nprint('\\n\\n\\n\\n')\nadmin1 = userObj.Admin('john', 'deer', 30)\nadmin1.describe_user()\nprint('\\n')\nadmin1.set_user_name('Reven10')\nprint('\\n')\nadmin1.describe_user()\nadmin1.privileges.show_privileges()\n",
"<import token>\nprint('\\n\\n\\n\\n')\nadmin1 = userObj.Admin('john', 'deer', 30)\nadmin1.describe_user()\nprint('\\n')\nadmin1.set_user_name('Reven10')\nprint('\\n')\nadmin1.describe_user()\nadmin1.privileges.show_privileges()\n",
"<import token>\nprint('\\n\\n\\n\\n')\n<assignment token>\nadmin1.describe_user()\nprint('\\n')\nadmin1.set_user_name('Reven10')\nprint('\\n')\nadmin1.describe_user()\nadmin1.privileges.show_privileges()\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
544 |
eb246beb05249f5dfde019b773698ba3bb1b1118
|
#-------------------------------------------------------------------------------
# Name: module1
# Purpose:
#
# Author: Nirvana
#
# Created: 07/06/2014
# Copyright: (c) Nirvana 2014
# Licence: <your licence>
#-------------------------------------------------------------------------------
import random
class Coin(object):
def __init__(self):
self.sideup = "Heads"
def toss(self):
if random.randint(0,1)==0:
self.sideup = "Heads"
else:
self.sideup = "Tails"
def get_sideup(self):
return self.sideup
mycoin=Coin()
print (mycoin.sideup)
print (mycoin.get_sideup())
mycoin.toss()
print (mycoin.get_sideup())
|
[
"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: Nirvana\n#\n# Created: 07/06/2014\n# Copyright: (c) Nirvana 2014\n# Licence: <your licence>\n#-------------------------------------------------------------------------------\n\nimport random\n\nclass Coin(object):\n def __init__(self):\n self.sideup = \"Heads\"\n\n def toss(self):\n if random.randint(0,1)==0:\n self.sideup = \"Heads\"\n else:\n self.sideup = \"Tails\"\n\n def get_sideup(self):\n return self.sideup\n\nmycoin=Coin()\nprint (mycoin.sideup)\nprint (mycoin.get_sideup())\nmycoin.toss()\nprint (mycoin.get_sideup())\n",
"import random\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n\n def get_sideup(self):\n return self.sideup\n\n\nmycoin = Coin()\nprint(mycoin.sideup)\nprint(mycoin.get_sideup())\nmycoin.toss()\nprint(mycoin.get_sideup())\n",
"<import token>\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n\n def get_sideup(self):\n return self.sideup\n\n\nmycoin = Coin()\nprint(mycoin.sideup)\nprint(mycoin.get_sideup())\nmycoin.toss()\nprint(mycoin.get_sideup())\n",
"<import token>\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n\n def get_sideup(self):\n return self.sideup\n\n\n<assignment token>\nprint(mycoin.sideup)\nprint(mycoin.get_sideup())\nmycoin.toss()\nprint(mycoin.get_sideup())\n",
"<import token>\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n\n def get_sideup(self):\n return self.sideup\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass Coin(object):\n\n def __init__(self):\n self.sideup = 'Heads'\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass Coin(object):\n <function token>\n\n def toss(self):\n if random.randint(0, 1) == 0:\n self.sideup = 'Heads'\n else:\n self.sideup = 'Tails'\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass Coin(object):\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
545 |
bcc4276ea240247519cabbf5fc5646a9147ee3be
|
import sublime
import sublime_plugin
class PromptSurrounderCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel("Surround by:", "", self.on_done, None, None)
def on_done(self, tag):
try:
if self.window.active_view():
self.window.active_view().run_command("surround_by", {"tag": tag})
except ValueError:
print('hi')
class SurroundByCommand(sublime_plugin.TextCommand):
def run(self, edit, tag):
for region in self.view.sel():
text = self.view.substr(region)
self.view.replace(edit,region,"<"+tag+">"+text+"</"+tag.split()[0]+">")
|
[
"import sublime\nimport sublime_plugin\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n def run(self):\n self.window.show_input_panel(\"Surround by:\", \"\", self.on_done, None, None)\n\n def on_done(self, tag):\n try:\n if self.window.active_view():\n self.window.active_view().run_command(\"surround_by\", {\"tag\": tag})\n except ValueError:\n print('hi')\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\tdef run(self, edit, tag):\n\t\tfor region in self.view.sel():\n\t\t\ttext = self.view.substr(region)\n\t\t\tself.view.replace(edit,region,\"<\"+tag+\">\"+text+\"</\"+tag.split()[0]+\">\")\n\n",
"import sublime\nimport sublime_plugin\n\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n\n def run(self):\n self.window.show_input_panel('Surround by:', '', self.on_done, None,\n None)\n\n def on_done(self, tag):\n try:\n if self.window.active_view():\n self.window.active_view().run_command('surround_by', {'tag':\n tag})\n except ValueError:\n print('hi')\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"<import token>\n\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n\n def run(self):\n self.window.show_input_panel('Surround by:', '', self.on_done, None,\n None)\n\n def on_done(self, tag):\n try:\n if self.window.active_view():\n self.window.active_view().run_command('surround_by', {'tag':\n tag})\n except ValueError:\n print('hi')\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"<import token>\n\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n <function token>\n\n def on_done(self, tag):\n try:\n if self.window.active_view():\n self.window.active_view().run_command('surround_by', {'tag':\n tag})\n except ValueError:\n print('hi')\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"<import token>\n\n\nclass PromptSurrounderCommand(sublime_plugin.WindowCommand):\n <function token>\n <function token>\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"<import token>\n<class token>\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n\n def run(self, edit, tag):\n for region in self.view.sel():\n text = self.view.substr(region)\n self.view.replace(edit, region, '<' + tag + '>' + text + '</' +\n tag.split()[0] + '>')\n",
"<import token>\n<class token>\n\n\nclass SurroundByCommand(sublime_plugin.TextCommand):\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
546 |
07783921da2fb4ae9452324f833b08b3f92ba294
|
"""
commands/map.py
description:
Generates a blank configuration file in the current directory
"""
from json import dumps
from .base_command import BaseCommand
class Map(BaseCommand):
def run(self):
from lib.models import Mapping
from lib.models import Migration
migration = Migration.load(self.options['MIGRATION_FILE'])
mapping = Mapping(self.options)
migration.mappings.append(mapping)
migration.write()
|
[
"\"\"\"\n\ncommands/map.py\n\ndescription:\n\tGenerates a blank configuration file in the current directory\n\n\"\"\"\n\nfrom json import dumps\nfrom .base_command import BaseCommand\n\nclass Map(BaseCommand):\n\tdef run(self):\n\t\tfrom lib.models import Mapping\n\t\tfrom lib.models import Migration\n\n\t\tmigration = Migration.load(self.options['MIGRATION_FILE'])\n\n\t\tmapping = Mapping(self.options)\n\n\t\tmigration.mappings.append(mapping)\n\n\t\tmigration.write()",
"<docstring token>\nfrom json import dumps\nfrom .base_command import BaseCommand\n\n\nclass Map(BaseCommand):\n\n def run(self):\n from lib.models import Mapping\n from lib.models import Migration\n migration = Migration.load(self.options['MIGRATION_FILE'])\n mapping = Mapping(self.options)\n migration.mappings.append(mapping)\n migration.write()\n",
"<docstring token>\n<import token>\n\n\nclass Map(BaseCommand):\n\n def run(self):\n from lib.models import Mapping\n from lib.models import Migration\n migration = Migration.load(self.options['MIGRATION_FILE'])\n mapping = Mapping(self.options)\n migration.mappings.append(mapping)\n migration.write()\n",
"<docstring token>\n<import token>\n\n\nclass Map(BaseCommand):\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
547 |
2f8dff78f5bc5ed18df97e2574b47f0a7711d372
|
# encoding: utf-8
"""
File: demo.py
Author: Rock Johnson
Description: 此文件为案例文件
"""
import sys
sys.path.append('../')
try:
from panicbuying.panic import Panic
except:
from panicbuying.panicbuying.panic import Panic
def main():
'''
公共参数:
store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),
version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),
hidden: 是否启用界面(默认启用),
商城抢购:
url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),
书店扒书(quit默认退出, hidden默认不启用):
books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),
account: 账号, password: 密码,
'''
books = {
'书名': '电子书链接地址',
}
xm = Panic(browser='Chrome', version='78.0.0', store='文泉',
books=books, path='路径', account='账号', password='密码',
)
xm.start()
if __name__ == '__main__':
main()
|
[
"# encoding: utf-8\n\n\"\"\"\nFile: demo.py\nAuthor: Rock Johnson\nDescription: 此文件为案例文件\n\"\"\"\nimport sys\n\nsys.path.append('../')\ntry:\n from panicbuying.panic import Panic\nexcept:\n from panicbuying.panicbuying.panic import Panic\n\n\ndef main():\n '''\n 公共参数:\n store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),\n version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),\n hidden: 是否启用界面(默认启用),\n\n 商城抢购:\n url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),\n\n 书店扒书(quit默认退出, hidden默认不启用):\n books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),\n account: 账号, password: 密码,\n '''\n books = {\n '书名': '电子书链接地址',\n }\n xm = Panic(browser='Chrome', version='78.0.0', store='文泉',\n books=books, path='路径', account='账号', password='密码',\n )\n xm.start()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\nimport sys\nsys.path.append('../')\ntry:\n from panicbuying.panic import Panic\nexcept:\n from panicbuying.panicbuying.panic import Panic\n\n\ndef main():\n \"\"\"\n 公共参数:\n store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),\n version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),\n hidden: 是否启用界面(默认启用),\n\n 商城抢购:\n url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),\n\n 书店扒书(quit默认退出, hidden默认不启用):\n books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),\n account: 账号, password: 密码,\n \"\"\"\n books = {'书名': '电子书链接地址'}\n xm = Panic(browser='Chrome', version='78.0.0', store='文泉', books=books,\n path='路径', account='账号', password='密码')\n xm.start()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\nsys.path.append('../')\ntry:\n from panicbuying.panic import Panic\nexcept:\n from panicbuying.panicbuying.panic import Panic\n\n\ndef main():\n \"\"\"\n 公共参数:\n store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),\n version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),\n hidden: 是否启用界面(默认启用),\n\n 商城抢购:\n url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),\n\n 书店扒书(quit默认退出, hidden默认不启用):\n books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),\n account: 账号, password: 密码,\n \"\"\"\n books = {'书名': '电子书链接地址'}\n xm = Panic(browser='Chrome', version='78.0.0', store='文泉', books=books,\n path='路径', account='账号', password='密码')\n xm.start()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n<code token>\n\n\ndef main():\n \"\"\"\n 公共参数:\n store: 商城或书店名称(小米|文泉), browser: 浏览器(目前只支持Chrome),\n version: 浏览器版本号, quit: 运行完后是否退出浏览器(默认不退出),\n hidden: 是否启用界面(默认启用),\n\n 商城抢购:\n url: 抢购商城地址, addr_nth: 收货地址(选择第几个收货地址,默认第一个),\n\n 书店扒书(quit默认退出, hidden默认不启用):\n books: {'书名': '电子书链接地址'}, path: 电子书图片保存地址(保存地址文件不存在需要先创建),\n account: 账号, password: 密码,\n \"\"\"\n books = {'书名': '电子书链接地址'}\n xm = Panic(browser='Chrome', version='78.0.0', store='文泉', books=books,\n path='路径', account='账号', password='密码')\n xm.start()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<function token>\n<code token>\n"
] | false |
548 |
03dd37346ed12bbd66cbebc46fadc37be319b986
|
import unittest
from reactivex import interval
from reactivex import operators as ops
from reactivex.testing import ReactiveTest, TestScheduler
from reactivex.testing.marbles import marbles_testing
from reactivex.testing.subscription import Subscription
on_next = ReactiveTest.on_next
on_completed = ReactiveTest.on_completed
on_error = ReactiveTest.on_error
subscribe = ReactiveTest.subscribe
subscribed = ReactiveTest.subscribed
disposed = ReactiveTest.disposed
created = ReactiveTest.created
class TestSwitchMapIndex(unittest.TestCase):
def test_switch_map_indexed_uses_index(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(300, "a"),
on_next(400, "b"),
on_next(500, "c"),
)
def create_inner(x: str, i: int):
def create_changing(j: int):
return (i, j, x)
return interval(20).pipe(ops.map(create_changing))
def create():
return xs.pipe(ops.switch_map_indexed(project=create_inner))
results = scheduler.start(create, disposed=580)
# (i, j, x): i is the index of the outer emit;
# j is the value of the inner interval;
# x is the value of the outer emission
assert results.messages == [
on_next(320, (0, 0, "a")),
on_next(340, (0, 1, "a")),
on_next(360, (0, 2, "a")),
on_next(380, (0, 3, "a")),
on_next(420, (1, 0, "b")),
on_next(440, (1, 1, "b")),
on_next(460, (1, 2, "b")),
on_next(480, (1, 3, "b")),
on_next(520, (2, 0, "c")),
on_next(540, (2, 1, "c")),
on_next(560, (2, 2, "c")),
]
assert xs.subscriptions == [Subscription(200, 580)]
def test_switch_map_indexed_inner_throws(self):
"""Inner throwing causes outer to throw"""
ex = "ex"
scheduler = TestScheduler()
sources = [
scheduler.create_cold_observable(on_next(100, "a"), on_next(300, "aa")),
scheduler.create_cold_observable(on_next(50, "b"), on_error(120, ex)),
scheduler.create_cold_observable(
on_next(50, "wont happen"), on_error(120, "no")
),
]
xs = scheduler.create_hot_observable(
on_next(
250,
0,
),
on_next(400, 1),
on_next(
550,
2,
),
)
def create_inner(x: int, _i: int):
return sources[x]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(350, "a"),
on_next(450, "b"),
on_error(520, ex),
]
assert sources[0].subscriptions == [Subscription(250, 400)]
assert sources[1].subscriptions == [Subscription(400, 520)]
assert sources[2].subscriptions == []
def test_switch_map_indexed_outer_throws(self):
"""Outer throwing unsubscribes from all"""
ex = "ABC"
scheduler = TestScheduler()
sources = [
scheduler.create_cold_observable(on_next(100, "a"), on_next(300, "aa")),
scheduler.create_cold_observable(on_next(50, "b"), on_error(120, ex)),
scheduler.create_cold_observable(
on_next(50, "wont happen"), on_error(120, "no")
),
]
xs = scheduler.create_hot_observable(
on_next(
250,
0,
),
on_next(400, 1),
on_error(430, ex),
)
def create_inner(x: int, _i: int):
return sources[x]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(350, "a"),
on_error(430, ex),
]
assert sources[0].subscriptions == [Subscription(250, 400)]
assert sources[1].subscriptions == [Subscription(400, 430)]
assert sources[2].subscriptions == []
def test_switch_map_indexed_no_inner(self):
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(on_completed(500))
# Fake inner which should never be subscribed to
sources = [scheduler.create_cold_observable(on_next(20, 2))]
def create_inner(_x: int, i: int):
return sources[i]
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [on_completed(500)]
assert xs.subscriptions == [Subscription(200, 500)]
assert sources[0].subscriptions == []
def test_switch_map_indexed_inner_completes(self):
"""Inner completions do not affect outer"""
scheduler = TestScheduler()
xs = scheduler.create_hot_observable(
on_next(300, "d"),
on_next(330, "f"),
on_completed(540),
)
def create_inner(x: str, i: int):
"""An observable which will complete after 40 ticks"""
return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))
def create():
return xs.pipe(ops.switch_map_indexed(create_inner))
results = scheduler.start(create)
assert results.messages == [
on_next(320, (0, 0, "d")),
on_next(350, (1, 0, "f")),
on_next(
370, (1, 1, "f")
), # here the current inner is unsubscribed but not the outer
on_completed(540), # only outer completion affects
]
def test_switch_map_default_mapper(self):
with marbles_testing(timespan=10) as (start, cold, hot, exp):
xs = hot(
" ---a---b------c-----",
{
"a": cold(" --1--2", None, None),
"b": cold(" --1-2-3-4-5|", None, None),
"c": cold(" --1--2", None, None),
},
None,
)
expected = exp(" -----1---1-2-3--1--2", None, None)
result = start(xs.pipe(ops.switch_map_indexed()))
assert result == expected
|
[
"import unittest\n\nfrom reactivex import interval\nfrom reactivex import operators as ops\nfrom reactivex.testing import ReactiveTest, TestScheduler\nfrom reactivex.testing.marbles import marbles_testing\nfrom reactivex.testing.subscription import Subscription\n\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(\n on_next(300, \"a\"),\n on_next(400, \"b\"),\n on_next(500, \"c\"),\n )\n\n def create_inner(x: str, i: int):\n def create_changing(j: int):\n return (i, j, x)\n\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n\n results = scheduler.start(create, disposed=580)\n # (i, j, x): i is the index of the outer emit;\n # j is the value of the inner interval;\n # x is the value of the outer emission\n assert results.messages == [\n on_next(320, (0, 0, \"a\")),\n on_next(340, (0, 1, \"a\")),\n on_next(360, (0, 2, \"a\")),\n on_next(380, (0, 3, \"a\")),\n on_next(420, (1, 0, \"b\")),\n on_next(440, (1, 1, \"b\")),\n on_next(460, (1, 2, \"b\")),\n on_next(480, (1, 3, \"b\")),\n on_next(520, (2, 0, \"c\")),\n on_next(540, (2, 1, \"c\")),\n on_next(560, (2, 2, \"c\")),\n ]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = \"ex\"\n scheduler = TestScheduler()\n sources = [\n scheduler.create_cold_observable(on_next(100, \"a\"), on_next(300, \"aa\")),\n scheduler.create_cold_observable(on_next(50, \"b\"), on_error(120, ex)),\n scheduler.create_cold_observable(\n on_next(50, \"wont happen\"), on_error(120, \"no\")\n ),\n ]\n xs = scheduler.create_hot_observable(\n on_next(\n 250,\n 0,\n ),\n on_next(400, 1),\n on_next(\n 550,\n 2,\n ),\n )\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(350, \"a\"),\n on_next(450, \"b\"),\n on_error(520, ex),\n ]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = \"ABC\"\n scheduler = TestScheduler()\n sources = [\n scheduler.create_cold_observable(on_next(100, \"a\"), on_next(300, \"aa\")),\n scheduler.create_cold_observable(on_next(50, \"b\"), on_error(120, ex)),\n scheduler.create_cold_observable(\n on_next(50, \"wont happen\"), on_error(120, \"no\")\n ),\n ]\n xs = scheduler.create_hot_observable(\n on_next(\n 250,\n 0,\n ),\n on_next(400, 1),\n on_error(430, ex),\n )\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(350, \"a\"),\n on_error(430, ex),\n ]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n # Fake inner which should never be subscribed to\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(\n on_next(300, \"d\"),\n on_next(330, \"f\"),\n on_completed(540),\n )\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n\n results = scheduler.start(create)\n assert results.messages == [\n on_next(320, (0, 0, \"d\")),\n on_next(350, (1, 0, \"f\")),\n on_next(\n 370, (1, 1, \"f\")\n ), # here the current inner is unsubscribed but not the outer\n on_completed(540), # only outer completion affects\n ]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(\n \" ---a---b------c-----\",\n {\n \"a\": cold(\" --1--2\", None, None),\n \"b\": cold(\" --1-2-3-4-5|\", None, None),\n \"c\": cold(\" --1--2\", None, None),\n },\n None,\n )\n expected = exp(\" -----1---1-2-3--1--2\", None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"import unittest\nfrom reactivex import interval\nfrom reactivex import operators as ops\nfrom reactivex.testing import ReactiveTest, TestScheduler\nfrom reactivex.testing.marbles import marbles_testing\nfrom reactivex.testing.subscription import Subscription\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"<import token>\non_next = ReactiveTest.on_next\non_completed = ReactiveTest.on_completed\non_error = ReactiveTest.on_error\nsubscribe = ReactiveTest.subscribe\nsubscribed = ReactiveTest.subscribed\ndisposed = ReactiveTest.disposed\ncreated = ReactiveTest.created\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"<import token>\n<assignment token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n\n def test_switch_map_indexed_uses_index(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'a'), on_next(400,\n 'b'), on_next(500, 'c'))\n\n def create_inner(x: str, i: int):\n\n def create_changing(j: int):\n return i, j, x\n return interval(20).pipe(ops.map(create_changing))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(project=create_inner))\n results = scheduler.start(create, disposed=580)\n assert results.messages == [on_next(320, (0, 0, 'a')), on_next(340,\n (0, 1, 'a')), on_next(360, (0, 2, 'a')), on_next(380, (0, 3,\n 'a')), on_next(420, (1, 0, 'b')), on_next(440, (1, 1, 'b')),\n on_next(460, (1, 2, 'b')), on_next(480, (1, 3, 'b')), on_next(\n 520, (2, 0, 'c')), on_next(540, (2, 1, 'c')), on_next(560, (2, \n 2, 'c'))]\n assert xs.subscriptions == [Subscription(200, 580)]\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"<import token>\n<assignment token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n <function token>\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n\n def test_switch_map_indexed_inner_completes(self):\n \"\"\"Inner completions do not affect outer\"\"\"\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_next(300, 'd'), on_next(330,\n 'f'), on_completed(540))\n\n def create_inner(x: str, i: int):\n \"\"\"An observable which will complete after 40 ticks\"\"\"\n return interval(20).pipe(ops.map(lambda j: (i, j, x)), ops.take(2))\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(320, (0, 0, 'd')), on_next(350,\n (1, 0, 'f')), on_next(370, (1, 1, 'f')), on_completed(540)]\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"<import token>\n<assignment token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n <function token>\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_no_inner(self):\n scheduler = TestScheduler()\n xs = scheduler.create_hot_observable(on_completed(500))\n sources = [scheduler.create_cold_observable(on_next(20, 2))]\n\n def create_inner(_x: int, i: int):\n return sources[i]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_completed(500)]\n assert xs.subscriptions == [Subscription(200, 500)]\n assert sources[0].subscriptions == []\n <function token>\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"<import token>\n<assignment token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n <function token>\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n\n def test_switch_map_indexed_outer_throws(self):\n \"\"\"Outer throwing unsubscribes from all\"\"\"\n ex = 'ABC'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_error(430, ex))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_error(430, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 430)]\n assert sources[2].subscriptions == []\n <function token>\n <function token>\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"<import token>\n<assignment token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n <function token>\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n <function token>\n <function token>\n <function token>\n\n def test_switch_map_default_mapper(self):\n with marbles_testing(timespan=10) as (start, cold, hot, exp):\n xs = hot(' ---a---b------c-----', {'a': cold(\n ' --1--2', None, None), 'b': cold(' --1-2-3-4-5|',\n None, None), 'c': cold(' --1--2', None, None)\n }, None)\n expected = exp(' -----1---1-2-3--1--2', None, None)\n result = start(xs.pipe(ops.switch_map_indexed()))\n assert result == expected\n",
"<import token>\n<assignment token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n <function token>\n\n def test_switch_map_indexed_inner_throws(self):\n \"\"\"Inner throwing causes outer to throw\"\"\"\n ex = 'ex'\n scheduler = TestScheduler()\n sources = [scheduler.create_cold_observable(on_next(100, 'a'),\n on_next(300, 'aa')), scheduler.create_cold_observable(on_next(\n 50, 'b'), on_error(120, ex)), scheduler.create_cold_observable(\n on_next(50, 'wont happen'), on_error(120, 'no'))]\n xs = scheduler.create_hot_observable(on_next(250, 0), on_next(400, \n 1), on_next(550, 2))\n\n def create_inner(x: int, _i: int):\n return sources[x]\n\n def create():\n return xs.pipe(ops.switch_map_indexed(create_inner))\n results = scheduler.start(create)\n assert results.messages == [on_next(350, 'a'), on_next(450, 'b'),\n on_error(520, ex)]\n assert sources[0].subscriptions == [Subscription(250, 400)]\n assert sources[1].subscriptions == [Subscription(400, 520)]\n assert sources[2].subscriptions == []\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass TestSwitchMapIndex(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
549 |
e8a36bd7826c5d71cf8012ea82df6c127dd858fc
|
from typing import Dict, Optional
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.optim as optim
import yaml
def get_device() -> torch.device:
if torch.cuda.is_available():
return torch.device("cuda")
return torch.device("cpu")
def load_yaml_config(config_path: str) -> Dict:
with open(config_path, "r") as stream:
return yaml.load(stream)
def get_optimizer(model: nn.Module, optim_config: Dict) -> optim.Optimizer:
return optim.Adam(model.parameters(), **optim_config)
def save_checkpoint(model: nn.Module, path: str):
torch.save(model.state_dict(), path)
def load_state_dict(path: str) -> OrderedDict:
return torch.load(path)
def load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):
if checkpoint_path:
model.load_state_dict(load_state_dict(checkpoint_path))
|
[
"from typing import Dict, Optional\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport yaml\n\n\ndef get_device() -> torch.device:\n if torch.cuda.is_available():\n return torch.device(\"cuda\")\n return torch.device(\"cpu\")\n\n\ndef load_yaml_config(config_path: str) -> Dict:\n with open(config_path, \"r\") as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) -> optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\ndef load_state_dict(path: str) -> OrderedDict:\n return torch.load(path)\n\n\ndef load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):\n if checkpoint_path:\n model.load_state_dict(load_state_dict(checkpoint_path))\n",
"from typing import Dict, Optional\nfrom collections import OrderedDict\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport yaml\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\ndef load_state_dict(path: str) ->OrderedDict:\n return torch.load(path)\n\n\ndef load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):\n if checkpoint_path:\n model.load_state_dict(load_state_dict(checkpoint_path))\n",
"<import token>\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\ndef load_state_dict(path: str) ->OrderedDict:\n return torch.load(path)\n\n\ndef load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):\n if checkpoint_path:\n model.load_state_dict(load_state_dict(checkpoint_path))\n",
"<import token>\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\n<function token>\n\n\ndef load_checkpoint(model: nn.Module, checkpoint_path: Optional[str]):\n if checkpoint_path:\n model.load_state_dict(load_state_dict(checkpoint_path))\n",
"<import token>\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\ndef save_checkpoint(model: nn.Module, path: str):\n torch.save(model.state_dict(), path)\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\ndef get_optimizer(model: nn.Module, optim_config: Dict) ->optim.Optimizer:\n return optim.Adam(model.parameters(), **optim_config)\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef get_device() ->torch.device:\n if torch.cuda.is_available():\n return torch.device('cuda')\n return torch.device('cpu')\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef load_yaml_config(config_path: str) ->Dict:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
550 |
63c214d9e831356345ba2eee68634af36964dcff
|
# Overview file
#import python classes
import numpy as np
import random as rn
import math
import matplotlib.pyplot as plt
import pylab
from mpl_toolkits.mplot3d import Axes3D
#import self produced classes
import forcemodule as fm
import init_sys
# independent parameters
dt = 0.004
N=2048
lpnum = 1000
density = 0.85
temp = 0.8
# Loading initial conditions
mom = init_sys.init_mom(N, temp)
pos, l = init_sys.init_pos(N, density)
forces = init_sys.init_forc(N)
pot = init_sys.init_pot(N)
print N, 'N'
# Iteration Verlet method
forces, pot = fm.calc_forces(pos,forces,pot,l,[N])
formersummom = 0
for lp in range(lpnum):
mom = mom + forces*0.5*dt
pos = (pos + mom*dt) % l # % l means modulo of l, hence it adds/subtracts n*l untill 0<pos<l
forces, pot = fm.calc_forces(pos,forces,pot,l,[N])
mom = mom + forces*0.5*dt
Ken = np.sum(mom*mom*0.5, axis=1)
toten = sum(Ken) - sum(pot)
print toten, np.sum(mom)
'''
fig = pylab.figure()
ax = Axes3D(fig)
ax.scatter(pos[:,0],pos[:,1],pos[:,2],c='b')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
'''
# Plotting the positions
|
[
"# Overview file\n\n#import python classes\nimport numpy as np\nimport random as rn\nimport math\nimport matplotlib.pyplot as plt\nimport pylab\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\n\n#import self produced classes\nimport forcemodule as fm\nimport init_sys\n\n\n# independent parameters\ndt = 0.004\nN=2048\nlpnum = 1000\ndensity = 0.85\ntemp = 0.8\n\n\n# Loading initial conditions\nmom = init_sys.init_mom(N, temp) \npos, l = init_sys.init_pos(N, density) \nforces = init_sys.init_forc(N)\npot = init_sys.init_pot(N)\n\nprint N, 'N'\n\n\n\n\n# Iteration Verlet method\n\nforces, pot = fm.calc_forces(pos,forces,pot,l,[N])\nformersummom = 0\nfor lp in range(lpnum):\n mom = mom + forces*0.5*dt\n pos = (pos + mom*dt) % l # % l means modulo of l, hence it adds/subtracts n*l untill 0<pos<l\n forces, pot = fm.calc_forces(pos,forces,pot,l,[N])\n mom = mom + forces*0.5*dt\n Ken = np.sum(mom*mom*0.5, axis=1)\n toten = sum(Ken) - sum(pot)\n print toten, np.sum(mom)\n'''\n fig = pylab.figure()\n ax = Axes3D(fig) \n ax.scatter(pos[:,0],pos[:,1],pos[:,2],c='b')\n ax.set_xlabel('X Label')\n ax.set_ylabel('Y Label')\n ax.set_zlabel('Z Label')\n plt.show()\n '''\n\n\n\n\n# Plotting the positions\n\n\n\n\n"
] | true |
551 |
8bb39149a5b7f4f4b1d3d62a002ab97421905ea1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
处理与合约名字有关的变量
"""
import re
# 上期所
PRODUCTS_SHFE = {'cu', 'al', 'zn', 'pb', 'ni', 'sn', 'au', 'ag', 'rb', 'wr', 'hc', 'fu', 'bu', 'ru'}
# 中金所
PRODUCTS_CFFEX = {'IF', 'IC', 'IH', 'T', 'TF'}
# 郑商所
PRODUCTS_CZCE = {'SR', 'CF', 'ZC', 'FG', 'TA', 'WH', 'PM', 'RI', 'LR', 'JR', 'RS', 'OI', 'RM', 'SF', 'SM', 'MA', 'WT',
'WS', 'RO', 'ER', 'ME', 'TC'}
# 大商所
PRODUCTS_DCE = {'m', 'y', 'a', 'b', 'p', 'c', 'cs', 'jd', 'fb', 'bb', 'l', 'v', 'pp', 'j', 'jm', 'i'}
EXCHANGES_wind_code_xapi = {
'CFE': 'CFFEX',
'SHF': 'SHFE',
'CZC': 'CZCE',
'DCE': 'DCE',
'SH': 'SSE',
'SZ': 'SZSE',
}
EXCHANGES_xapi_wind_code = dict((v, k) for k, v in EXCHANGES_wind_code_xapi.items())
def product_to_exchange(product):
"""
将合约产品码转成交易所
:param product:
:return:
"""
PRODUCT_ = product.upper()
if PRODUCT_ in PRODUCTS_CFFEX:
return 'CFFEX'
if PRODUCT_ in PRODUCTS_CZCE:
return 'CZCE'
product_ = product.lower()
if product_ in PRODUCTS_SHFE:
return 'SHFE'
if product_ in PRODUCTS_DCE:
return 'DCE'
return 'Unknown'
def is_shfe(product):
"""
是否上期所
多添加此函数的主要原因是上期所需要区分平今与平昨
:param product:
:return:
"""
product_ = product.lower()
return product_ in PRODUCTS_SHFE
def get_product(symbol):
"""
从合约名中提取产品名
:param symbol:
:return:
"""
pattern = re.compile(r'(\D{1,2})(\d{0,1})(\d{3})')
match = pattern.match(symbol)
if match:
return match.expand(r'\g<1>')
else:
return symbol
def get_exchange(symbol):
"""
从带.的合约名中提取交易所
:param symbol:
:return:
"""
pattern = re.compile(r'(\.)(\D{1,4})')
match = pattern.match(symbol)
if match:
return match.expand(r'\g<2>')
else:
return symbol
if __name__ == '__main__':
import pandas as pd
df = pd.DataFrame({'Symbol': ['IF1603', 'rb1708','600000']})
df['IsSHFE'] = list(map(is_shfe, map(get_product, df['Symbol'])))
df['product'] = list(map(get_product, df['Symbol']))
df['IsSHFE'] = list(map(is_shfe, df['product']))
print(df)
print(get_product('IF1406'))
print(EXCHANGES_wind_code_xapi.get('SH'))
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n处理与合约名字有关的变量\n\"\"\"\nimport re\n\n# 上期所\nPRODUCTS_SHFE = {'cu', 'al', 'zn', 'pb', 'ni', 'sn', 'au', 'ag', 'rb', 'wr', 'hc', 'fu', 'bu', 'ru'}\n# 中金所\nPRODUCTS_CFFEX = {'IF', 'IC', 'IH', 'T', 'TF'}\n# 郑商所\nPRODUCTS_CZCE = {'SR', 'CF', 'ZC', 'FG', 'TA', 'WH', 'PM', 'RI', 'LR', 'JR', 'RS', 'OI', 'RM', 'SF', 'SM', 'MA', 'WT',\n 'WS', 'RO', 'ER', 'ME', 'TC'}\n# 大商所\nPRODUCTS_DCE = {'m', 'y', 'a', 'b', 'p', 'c', 'cs', 'jd', 'fb', 'bb', 'l', 'v', 'pp', 'j', 'jm', 'i'}\n\nEXCHANGES_wind_code_xapi = {\n 'CFE': 'CFFEX',\n 'SHF': 'SHFE',\n 'CZC': 'CZCE',\n 'DCE': 'DCE',\n 'SH': 'SSE',\n 'SZ': 'SZSE',\n}\n\nEXCHANGES_xapi_wind_code = dict((v, k) for k, v in EXCHANGES_wind_code_xapi.items())\n\n\ndef product_to_exchange(product):\n \"\"\"\n 将合约产品码转成交易所\n :param product:\n :return:\n \"\"\"\n PRODUCT_ = product.upper()\n if PRODUCT_ in PRODUCTS_CFFEX:\n return 'CFFEX'\n if PRODUCT_ in PRODUCTS_CZCE:\n return 'CZCE'\n product_ = product.lower()\n if product_ in PRODUCTS_SHFE:\n return 'SHFE'\n if product_ in PRODUCTS_DCE:\n return 'DCE'\n return 'Unknown'\n\n\ndef is_shfe(product):\n \"\"\"\n 是否上期所\n 多添加此函数的主要原因是上期所需要区分平今与平昨\n :param product:\n :return:\n \"\"\"\n product_ = product.lower()\n return product_ in PRODUCTS_SHFE\n\n\ndef get_product(symbol):\n \"\"\"\n 从合约名中提取产品名\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile(r'(\\D{1,2})(\\d{0,1})(\\d{3})')\n match = pattern.match(symbol)\n if match:\n return match.expand(r'\\g<1>')\n else:\n return symbol\n\n\ndef get_exchange(symbol):\n \"\"\"\n 从带.的合约名中提取交易所\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile(r'(\\.)(\\D{1,4})')\n match = pattern.match(symbol)\n if match:\n return match.expand(r'\\g<2>')\n else:\n return symbol\n\n\nif __name__ == '__main__':\n import pandas as pd\n\n df = pd.DataFrame({'Symbol': ['IF1603', 'rb1708','600000']})\n df['IsSHFE'] = list(map(is_shfe, map(get_product, df['Symbol'])))\n\n df['product'] = list(map(get_product, df['Symbol']))\n df['IsSHFE'] = list(map(is_shfe, df['product']))\n print(df)\n print(get_product('IF1406'))\n print(EXCHANGES_wind_code_xapi.get('SH'))\n",
"<docstring token>\nimport re\nPRODUCTS_SHFE = {'cu', 'al', 'zn', 'pb', 'ni', 'sn', 'au', 'ag', 'rb', 'wr',\n 'hc', 'fu', 'bu', 'ru'}\nPRODUCTS_CFFEX = {'IF', 'IC', 'IH', 'T', 'TF'}\nPRODUCTS_CZCE = {'SR', 'CF', 'ZC', 'FG', 'TA', 'WH', 'PM', 'RI', 'LR', 'JR',\n 'RS', 'OI', 'RM', 'SF', 'SM', 'MA', 'WT', 'WS', 'RO', 'ER', 'ME', 'TC'}\nPRODUCTS_DCE = {'m', 'y', 'a', 'b', 'p', 'c', 'cs', 'jd', 'fb', 'bb', 'l',\n 'v', 'pp', 'j', 'jm', 'i'}\nEXCHANGES_wind_code_xapi = {'CFE': 'CFFEX', 'SHF': 'SHFE', 'CZC': 'CZCE',\n 'DCE': 'DCE', 'SH': 'SSE', 'SZ': 'SZSE'}\nEXCHANGES_xapi_wind_code = dict((v, k) for k, v in EXCHANGES_wind_code_xapi\n .items())\n\n\ndef product_to_exchange(product):\n \"\"\"\n 将合约产品码转成交易所\n :param product:\n :return:\n \"\"\"\n PRODUCT_ = product.upper()\n if PRODUCT_ in PRODUCTS_CFFEX:\n return 'CFFEX'\n if PRODUCT_ in PRODUCTS_CZCE:\n return 'CZCE'\n product_ = product.lower()\n if product_ in PRODUCTS_SHFE:\n return 'SHFE'\n if product_ in PRODUCTS_DCE:\n return 'DCE'\n return 'Unknown'\n\n\ndef is_shfe(product):\n \"\"\"\n 是否上期所\n 多添加此函数的主要原因是上期所需要区分平今与平昨\n :param product:\n :return:\n \"\"\"\n product_ = product.lower()\n return product_ in PRODUCTS_SHFE\n\n\ndef get_product(symbol):\n \"\"\"\n 从合约名中提取产品名\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\D{1,2})(\\\\d{0,1})(\\\\d{3})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<1>')\n else:\n return symbol\n\n\ndef get_exchange(symbol):\n \"\"\"\n 从带.的合约名中提取交易所\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\.)(\\\\D{1,4})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<2>')\n else:\n return symbol\n\n\nif __name__ == '__main__':\n import pandas as pd\n df = pd.DataFrame({'Symbol': ['IF1603', 'rb1708', '600000']})\n df['IsSHFE'] = list(map(is_shfe, map(get_product, df['Symbol'])))\n df['product'] = list(map(get_product, df['Symbol']))\n df['IsSHFE'] = list(map(is_shfe, df['product']))\n print(df)\n print(get_product('IF1406'))\n print(EXCHANGES_wind_code_xapi.get('SH'))\n",
"<docstring token>\n<import token>\nPRODUCTS_SHFE = {'cu', 'al', 'zn', 'pb', 'ni', 'sn', 'au', 'ag', 'rb', 'wr',\n 'hc', 'fu', 'bu', 'ru'}\nPRODUCTS_CFFEX = {'IF', 'IC', 'IH', 'T', 'TF'}\nPRODUCTS_CZCE = {'SR', 'CF', 'ZC', 'FG', 'TA', 'WH', 'PM', 'RI', 'LR', 'JR',\n 'RS', 'OI', 'RM', 'SF', 'SM', 'MA', 'WT', 'WS', 'RO', 'ER', 'ME', 'TC'}\nPRODUCTS_DCE = {'m', 'y', 'a', 'b', 'p', 'c', 'cs', 'jd', 'fb', 'bb', 'l',\n 'v', 'pp', 'j', 'jm', 'i'}\nEXCHANGES_wind_code_xapi = {'CFE': 'CFFEX', 'SHF': 'SHFE', 'CZC': 'CZCE',\n 'DCE': 'DCE', 'SH': 'SSE', 'SZ': 'SZSE'}\nEXCHANGES_xapi_wind_code = dict((v, k) for k, v in EXCHANGES_wind_code_xapi\n .items())\n\n\ndef product_to_exchange(product):\n \"\"\"\n 将合约产品码转成交易所\n :param product:\n :return:\n \"\"\"\n PRODUCT_ = product.upper()\n if PRODUCT_ in PRODUCTS_CFFEX:\n return 'CFFEX'\n if PRODUCT_ in PRODUCTS_CZCE:\n return 'CZCE'\n product_ = product.lower()\n if product_ in PRODUCTS_SHFE:\n return 'SHFE'\n if product_ in PRODUCTS_DCE:\n return 'DCE'\n return 'Unknown'\n\n\ndef is_shfe(product):\n \"\"\"\n 是否上期所\n 多添加此函数的主要原因是上期所需要区分平今与平昨\n :param product:\n :return:\n \"\"\"\n product_ = product.lower()\n return product_ in PRODUCTS_SHFE\n\n\ndef get_product(symbol):\n \"\"\"\n 从合约名中提取产品名\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\D{1,2})(\\\\d{0,1})(\\\\d{3})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<1>')\n else:\n return symbol\n\n\ndef get_exchange(symbol):\n \"\"\"\n 从带.的合约名中提取交易所\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\.)(\\\\D{1,4})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<2>')\n else:\n return symbol\n\n\nif __name__ == '__main__':\n import pandas as pd\n df = pd.DataFrame({'Symbol': ['IF1603', 'rb1708', '600000']})\n df['IsSHFE'] = list(map(is_shfe, map(get_product, df['Symbol'])))\n df['product'] = list(map(get_product, df['Symbol']))\n df['IsSHFE'] = list(map(is_shfe, df['product']))\n print(df)\n print(get_product('IF1406'))\n print(EXCHANGES_wind_code_xapi.get('SH'))\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef product_to_exchange(product):\n \"\"\"\n 将合约产品码转成交易所\n :param product:\n :return:\n \"\"\"\n PRODUCT_ = product.upper()\n if PRODUCT_ in PRODUCTS_CFFEX:\n return 'CFFEX'\n if PRODUCT_ in PRODUCTS_CZCE:\n return 'CZCE'\n product_ = product.lower()\n if product_ in PRODUCTS_SHFE:\n return 'SHFE'\n if product_ in PRODUCTS_DCE:\n return 'DCE'\n return 'Unknown'\n\n\ndef is_shfe(product):\n \"\"\"\n 是否上期所\n 多添加此函数的主要原因是上期所需要区分平今与平昨\n :param product:\n :return:\n \"\"\"\n product_ = product.lower()\n return product_ in PRODUCTS_SHFE\n\n\ndef get_product(symbol):\n \"\"\"\n 从合约名中提取产品名\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\D{1,2})(\\\\d{0,1})(\\\\d{3})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<1>')\n else:\n return symbol\n\n\ndef get_exchange(symbol):\n \"\"\"\n 从带.的合约名中提取交易所\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\.)(\\\\D{1,4})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<2>')\n else:\n return symbol\n\n\nif __name__ == '__main__':\n import pandas as pd\n df = pd.DataFrame({'Symbol': ['IF1603', 'rb1708', '600000']})\n df['IsSHFE'] = list(map(is_shfe, map(get_product, df['Symbol'])))\n df['product'] = list(map(get_product, df['Symbol']))\n df['IsSHFE'] = list(map(is_shfe, df['product']))\n print(df)\n print(get_product('IF1406'))\n print(EXCHANGES_wind_code_xapi.get('SH'))\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\ndef product_to_exchange(product):\n \"\"\"\n 将合约产品码转成交易所\n :param product:\n :return:\n \"\"\"\n PRODUCT_ = product.upper()\n if PRODUCT_ in PRODUCTS_CFFEX:\n return 'CFFEX'\n if PRODUCT_ in PRODUCTS_CZCE:\n return 'CZCE'\n product_ = product.lower()\n if product_ in PRODUCTS_SHFE:\n return 'SHFE'\n if product_ in PRODUCTS_DCE:\n return 'DCE'\n return 'Unknown'\n\n\ndef is_shfe(product):\n \"\"\"\n 是否上期所\n 多添加此函数的主要原因是上期所需要区分平今与平昨\n :param product:\n :return:\n \"\"\"\n product_ = product.lower()\n return product_ in PRODUCTS_SHFE\n\n\ndef get_product(symbol):\n \"\"\"\n 从合约名中提取产品名\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\D{1,2})(\\\\d{0,1})(\\\\d{3})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<1>')\n else:\n return symbol\n\n\ndef get_exchange(symbol):\n \"\"\"\n 从带.的合约名中提取交易所\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\.)(\\\\D{1,4})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<2>')\n else:\n return symbol\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef is_shfe(product):\n \"\"\"\n 是否上期所\n 多添加此函数的主要原因是上期所需要区分平今与平昨\n :param product:\n :return:\n \"\"\"\n product_ = product.lower()\n return product_ in PRODUCTS_SHFE\n\n\ndef get_product(symbol):\n \"\"\"\n 从合约名中提取产品名\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\D{1,2})(\\\\d{0,1})(\\\\d{3})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<1>')\n else:\n return symbol\n\n\ndef get_exchange(symbol):\n \"\"\"\n 从带.的合约名中提取交易所\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\.)(\\\\D{1,4})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<2>')\n else:\n return symbol\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef is_shfe(product):\n \"\"\"\n 是否上期所\n 多添加此函数的主要原因是上期所需要区分平今与平昨\n :param product:\n :return:\n \"\"\"\n product_ = product.lower()\n return product_ in PRODUCTS_SHFE\n\n\n<function token>\n\n\ndef get_exchange(symbol):\n \"\"\"\n 从带.的合约名中提取交易所\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\.)(\\\\D{1,4})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<2>')\n else:\n return symbol\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_exchange(symbol):\n \"\"\"\n 从带.的合约名中提取交易所\n :param symbol:\n :return:\n \"\"\"\n pattern = re.compile('(\\\\.)(\\\\D{1,4})')\n match = pattern.match(symbol)\n if match:\n return match.expand('\\\\g<2>')\n else:\n return symbol\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
552 |
58aa72588357b18ab42391dfffbf2a1b66589edd
|
# pyOCD debugger
# Copyright (c) 2006-2013,2018 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..family.target_kinetis import Kinetis
from ..family.flash_kinetis import Flash_Kinetis
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x09032200, 0xd373428b, 0x428b0a03, 0x0b03d358, 0xd33c428b, 0x428b0c03, 0xe012d321, 0x430b4603,
0x2200d47f, 0x428b0843, 0x0903d374, 0xd35f428b, 0x428b0a03, 0x0b03d344, 0xd328428b, 0x428b0c03,
0x22ffd30d, 0xba120209, 0x428b0c03, 0x1212d302, 0xd0650209, 0x428b0b03, 0xe000d319, 0x0bc30a09,
0xd301428b, 0x1ac003cb, 0x0b834152, 0xd301428b, 0x1ac0038b, 0x0b434152, 0xd301428b, 0x1ac0034b,
0x0b034152, 0xd301428b, 0x1ac0030b, 0x0ac34152, 0xd301428b, 0x1ac002cb, 0x0a834152, 0xd301428b,
0x1ac0028b, 0x0a434152, 0xd301428b, 0x1ac0024b, 0x0a034152, 0xd301428b, 0x1ac0020b, 0xd2cd4152,
0x428b09c3, 0x01cbd301, 0x41521ac0, 0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301,
0x41521ac0, 0x428b0903, 0x010bd301, 0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883,
0x008bd301, 0x41521ac0, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41, 0x41524601, 0x47704610,
0x0fcae05d, 0x4249d000, 0xd3001003, 0x40534240, 0x469c2200, 0x428b0903, 0x0a03d32d, 0xd312428b,
0x018922fc, 0x0a03ba12, 0xd30c428b, 0x11920189, 0xd308428b, 0x11920189, 0xd304428b, 0xd03a0189,
0xe0001192, 0x09c30989, 0xd301428b, 0x1ac001cb, 0x09834152, 0xd301428b, 0x1ac0018b, 0x09434152,
0xd301428b, 0x1ac0014b, 0x09034152, 0xd301428b, 0x1ac0010b, 0x08c34152, 0xd301428b, 0x1ac000cb,
0x08834152, 0xd301428b, 0x1ac0008b, 0xd2d94152, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41,
0x46634601, 0x105b4152, 0xd3014610, 0x2b004240, 0x4249d500, 0x46634770, 0xd300105b, 0xb5014240,
0x46c02000, 0xbd0246c0, 0xb510480a, 0x44484908, 0xf8fcf000, 0xd1042800, 0x21004806, 0xf0004448,
0x4a05f9c9, 0x230168d1, 0x4319029b, 0xbd1060d1, 0x6b65666b, 0x00000004, 0xf0003000, 0x4c0cb570,
0x444c4605, 0x4b0b4601, 0x68e24620, 0xf8a4f000, 0xd1052800, 0x46292300, 0x68e24620, 0xf96ef000,
0x68ca4905, 0x029b2301, 0x60ca431a, 0x0000bd70, 0x00000004, 0x6b65666b, 0xf0003000, 0x4809b510,
0x81c14907, 0x81c14908, 0x08498801, 0x80010049, 0x44484806, 0xf8f2f000, 0xd0002800, 0xbd102001,
0x0000c520, 0x40052000, 0x0000d928, 0x00000004, 0x460cb570, 0x4606460b, 0x480d4601, 0x4615b084,
0xf0004448, 0x2800f903, 0x9001d10a, 0x21019002, 0x91004807, 0x4622462b, 0x44484631, 0xf978f000,
0x68ca4904, 0x029b2301, 0x60ca431a, 0xbd70b004, 0x00000004, 0xf0003000, 0x47702000, 0xd0082800,
0xd802290f, 0xd1042a04, 0x2913e005, 0x2a08d801, 0x2004d001, 0x20004770, 0x28004770, 0x2004d101,
0xb4104770, 0x460c1e5b, 0xd101421c, 0xd002421a, 0x2065bc10, 0x68034770, 0xd804428b, 0x18896840,
0x42881818, 0xbc10d202, 0x47702066, 0x2000bc10, 0x00004770, 0x42884903, 0x206bd001, 0x20004770,
0x00004770, 0x6b65666b, 0x2170480a, 0x21807001, 0x78017001, 0xd5fc0609, 0x06817800, 0x2067d501,
0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc, 0x00004770, 0x40020000, 0x4605b5f8, 0x460c4616,
0xf7ff4618, 0x2800ffd7, 0x2304d12b, 0x46214632, 0xf7ff4628, 0x0007ffb2, 0x19a6d123, 0x68e91e76,
0x91004630, 0xfe2cf7ff, 0xd0032900, 0x1c409e00, 0x1e764346, 0xd81342b4, 0x4478480a, 0x60046800,
0x20094909, 0xf7ff71c8, 0x4607ffbf, 0x280069a8, 0x4780d000, 0xd1032f00, 0x190468e8, 0xd9eb42b4,
0xbdf84638, 0x0000027a, 0x40020000, 0x4604b510, 0xf7ff4608, 0x2800ff9f, 0x2c00d106, 0x4904d005,
0x71c82044, 0xffa0f7ff, 0x2004bd10, 0x0000bd10, 0x40020000, 0xd00c2800, 0xd00a2a00, 0xd21a2908,
0x447b000b, 0x18db791b, 0x0705449f, 0x0d0b0907, 0x2004110f, 0x68c04770, 0x6840e00a, 0x6880e008,
0x6800e006, 0x2001e004, 0x6900e002, 0x6940e000, 0x20006010, 0x206a4770, 0x00004770, 0xd00a2800,
0x68c9490f, 0x0e094a0f, 0x447a0049, 0x03095a51, 0x2064d103, 0x20044770, 0xb4104770, 0x60032300,
0x21016041, 0x02896081, 0x490760c1, 0x158a7a0c, 0x610240a2, 0x61837ac9, 0xbc106141, 0x47704618,
0x40048040, 0x000001aa, 0x40020020, 0xd1012a00, 0x47702004, 0x461cb5ff, 0x4615b081, 0x2304460e,
0x98014622, 0xff19f7ff, 0xd1190007, 0xd0162c00, 0x4478480c, 0x600e6801, 0x6800cd02, 0x490a6041,
0x71c82006, 0xff30f7ff, 0x98014607, 0x28006980, 0x4780d000, 0xd1022f00, 0x1f241d36, 0x4638d1e8,
0xbdf0b005, 0x00000162, 0x40020000, 0xd0022800, 0x20006181, 0x20044770, 0x00004770, 0xb081b5ff,
0x460e4614, 0x23044605, 0xfee7f7ff, 0xd12a2800, 0x686868a9, 0xfd64f7ff, 0x42719000, 0x40014240,
0x42b7424f, 0x9800d101, 0x2c00183f, 0x1bbdd01a, 0xd90042a5, 0x490d4625, 0x447908a8, 0x600e6809,
0x2201490b, 0x0a0271ca, 0x728872ca, 0x72489804, 0xfeeaf7ff, 0xd1062800, 0x1b649800, 0x183f1976,
0xd1e42c00, 0xb0052000, 0x0000bdf0, 0x000000da, 0x40020000, 0xd1012800, 0x47702004, 0x4803b510,
0x71c22240, 0xf7ff7181, 0xbd10fecf, 0x40020000, 0xd1012b00, 0x47702004, 0x461cb5f8, 0x460e4615,
0x9f082304, 0xfe99f7ff, 0xd1192800, 0xd0172d00, 0x447a4a0f, 0x60066810, 0x2102480e, 0x990671c1,
0x681172c1, 0x60886820, 0xfeaef7ff, 0xd0082800, 0x29009907, 0x600ed000, 0xd0012f00, 0x60392100,
0x1f2dbdf8, 0x1d361d24, 0xd1e12d00, 0x0000bdf8, 0x00000062, 0x40020000, 0x00040002, 0x00080000,
0x00100000, 0x00200000, 0x00400000, 0x00000000, 0x00000000, 0x00200000, 0x40020004, 0x00000000,
],
'pc_init' : 0x2000027D,
'pc_unInit': 0x200002F9,
'pc_program_page': 0x200002B1,
'pc_erase_sector': 0x2000023D,
'pc_eraseAll' : 0x20000209,
'static_base' : 0x20000000 + 0x00000020 + 0x0000063c,
'begin_stack' : 0x20000000 + 0x00000800,
'begin_data' : 0x20000000 + 0x00000A00,
'page_buffers' : [0x20000a00, 0x20001200], # Enable double buffering
'min_program_length' : 4,
'analyzer_supported' : True,
'analyzer_address' : 0x1ffff800
}
class KV11Z7(Kinetis):
MEMORY_MAP = MemoryMap(
FlashRegion( start=0, length=0x20000, blocksize=0x400, is_boot_memory=True,
algo=FLASH_ALGO, flash_class=Flash_Kinetis),
RamRegion( start=0x1ffff000, length=0x4000)
)
def __init__(self, session):
super(KV11Z7, self).__init__(session, self.MEMORY_MAP)
self._svd_location = SVDFile.from_builtin("MKV11Z7.svd")
|
[
"# pyOCD debugger\n# Copyright (c) 2006-2013,2018 Arm Limited\n# SPDX-License-Identifier: Apache-2.0\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom ..family.target_kinetis import Kinetis\nfrom ..family.flash_kinetis import Flash_Kinetis\nfrom ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)\nfrom ...debug.svd.loader import SVDFile\n\nFLASH_ALGO = { 'load_address' : 0x20000000,\n 'instructions' : [\n 0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,\n 0x09032200, 0xd373428b, 0x428b0a03, 0x0b03d358, 0xd33c428b, 0x428b0c03, 0xe012d321, 0x430b4603,\n 0x2200d47f, 0x428b0843, 0x0903d374, 0xd35f428b, 0x428b0a03, 0x0b03d344, 0xd328428b, 0x428b0c03,\n 0x22ffd30d, 0xba120209, 0x428b0c03, 0x1212d302, 0xd0650209, 0x428b0b03, 0xe000d319, 0x0bc30a09,\n 0xd301428b, 0x1ac003cb, 0x0b834152, 0xd301428b, 0x1ac0038b, 0x0b434152, 0xd301428b, 0x1ac0034b,\n 0x0b034152, 0xd301428b, 0x1ac0030b, 0x0ac34152, 0xd301428b, 0x1ac002cb, 0x0a834152, 0xd301428b,\n 0x1ac0028b, 0x0a434152, 0xd301428b, 0x1ac0024b, 0x0a034152, 0xd301428b, 0x1ac0020b, 0xd2cd4152,\n 0x428b09c3, 0x01cbd301, 0x41521ac0, 0x428b0983, 0x018bd301, 0x41521ac0, 0x428b0943, 0x014bd301,\n 0x41521ac0, 0x428b0903, 0x010bd301, 0x41521ac0, 0x428b08c3, 0x00cbd301, 0x41521ac0, 0x428b0883,\n 0x008bd301, 0x41521ac0, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41, 0x41524601, 0x47704610,\n 0x0fcae05d, 0x4249d000, 0xd3001003, 0x40534240, 0x469c2200, 0x428b0903, 0x0a03d32d, 0xd312428b,\n 0x018922fc, 0x0a03ba12, 0xd30c428b, 0x11920189, 0xd308428b, 0x11920189, 0xd304428b, 0xd03a0189,\n 0xe0001192, 0x09c30989, 0xd301428b, 0x1ac001cb, 0x09834152, 0xd301428b, 0x1ac0018b, 0x09434152,\n 0xd301428b, 0x1ac0014b, 0x09034152, 0xd301428b, 0x1ac0010b, 0x08c34152, 0xd301428b, 0x1ac000cb,\n 0x08834152, 0xd301428b, 0x1ac0008b, 0xd2d94152, 0x428b0843, 0x004bd301, 0x41521ac0, 0xd2001a41,\n 0x46634601, 0x105b4152, 0xd3014610, 0x2b004240, 0x4249d500, 0x46634770, 0xd300105b, 0xb5014240,\n 0x46c02000, 0xbd0246c0, 0xb510480a, 0x44484908, 0xf8fcf000, 0xd1042800, 0x21004806, 0xf0004448,\n 0x4a05f9c9, 0x230168d1, 0x4319029b, 0xbd1060d1, 0x6b65666b, 0x00000004, 0xf0003000, 0x4c0cb570,\n 0x444c4605, 0x4b0b4601, 0x68e24620, 0xf8a4f000, 0xd1052800, 0x46292300, 0x68e24620, 0xf96ef000,\n 0x68ca4905, 0x029b2301, 0x60ca431a, 0x0000bd70, 0x00000004, 0x6b65666b, 0xf0003000, 0x4809b510,\n 0x81c14907, 0x81c14908, 0x08498801, 0x80010049, 0x44484806, 0xf8f2f000, 0xd0002800, 0xbd102001,\n 0x0000c520, 0x40052000, 0x0000d928, 0x00000004, 0x460cb570, 0x4606460b, 0x480d4601, 0x4615b084,\n 0xf0004448, 0x2800f903, 0x9001d10a, 0x21019002, 0x91004807, 0x4622462b, 0x44484631, 0xf978f000,\n 0x68ca4904, 0x029b2301, 0x60ca431a, 0xbd70b004, 0x00000004, 0xf0003000, 0x47702000, 0xd0082800,\n 0xd802290f, 0xd1042a04, 0x2913e005, 0x2a08d801, 0x2004d001, 0x20004770, 0x28004770, 0x2004d101,\n 0xb4104770, 0x460c1e5b, 0xd101421c, 0xd002421a, 0x2065bc10, 0x68034770, 0xd804428b, 0x18896840,\n 0x42881818, 0xbc10d202, 0x47702066, 0x2000bc10, 0x00004770, 0x42884903, 0x206bd001, 0x20004770,\n 0x00004770, 0x6b65666b, 0x2170480a, 0x21807001, 0x78017001, 0xd5fc0609, 0x06817800, 0x2067d501,\n 0x06c14770, 0x2068d501, 0x07c04770, 0x2069d0fc, 0x00004770, 0x40020000, 0x4605b5f8, 0x460c4616,\n 0xf7ff4618, 0x2800ffd7, 0x2304d12b, 0x46214632, 0xf7ff4628, 0x0007ffb2, 0x19a6d123, 0x68e91e76,\n 0x91004630, 0xfe2cf7ff, 0xd0032900, 0x1c409e00, 0x1e764346, 0xd81342b4, 0x4478480a, 0x60046800,\n 0x20094909, 0xf7ff71c8, 0x4607ffbf, 0x280069a8, 0x4780d000, 0xd1032f00, 0x190468e8, 0xd9eb42b4,\n 0xbdf84638, 0x0000027a, 0x40020000, 0x4604b510, 0xf7ff4608, 0x2800ff9f, 0x2c00d106, 0x4904d005,\n 0x71c82044, 0xffa0f7ff, 0x2004bd10, 0x0000bd10, 0x40020000, 0xd00c2800, 0xd00a2a00, 0xd21a2908,\n 0x447b000b, 0x18db791b, 0x0705449f, 0x0d0b0907, 0x2004110f, 0x68c04770, 0x6840e00a, 0x6880e008,\n 0x6800e006, 0x2001e004, 0x6900e002, 0x6940e000, 0x20006010, 0x206a4770, 0x00004770, 0xd00a2800,\n 0x68c9490f, 0x0e094a0f, 0x447a0049, 0x03095a51, 0x2064d103, 0x20044770, 0xb4104770, 0x60032300,\n 0x21016041, 0x02896081, 0x490760c1, 0x158a7a0c, 0x610240a2, 0x61837ac9, 0xbc106141, 0x47704618,\n 0x40048040, 0x000001aa, 0x40020020, 0xd1012a00, 0x47702004, 0x461cb5ff, 0x4615b081, 0x2304460e,\n 0x98014622, 0xff19f7ff, 0xd1190007, 0xd0162c00, 0x4478480c, 0x600e6801, 0x6800cd02, 0x490a6041,\n 0x71c82006, 0xff30f7ff, 0x98014607, 0x28006980, 0x4780d000, 0xd1022f00, 0x1f241d36, 0x4638d1e8,\n 0xbdf0b005, 0x00000162, 0x40020000, 0xd0022800, 0x20006181, 0x20044770, 0x00004770, 0xb081b5ff,\n 0x460e4614, 0x23044605, 0xfee7f7ff, 0xd12a2800, 0x686868a9, 0xfd64f7ff, 0x42719000, 0x40014240,\n 0x42b7424f, 0x9800d101, 0x2c00183f, 0x1bbdd01a, 0xd90042a5, 0x490d4625, 0x447908a8, 0x600e6809,\n 0x2201490b, 0x0a0271ca, 0x728872ca, 0x72489804, 0xfeeaf7ff, 0xd1062800, 0x1b649800, 0x183f1976,\n 0xd1e42c00, 0xb0052000, 0x0000bdf0, 0x000000da, 0x40020000, 0xd1012800, 0x47702004, 0x4803b510,\n 0x71c22240, 0xf7ff7181, 0xbd10fecf, 0x40020000, 0xd1012b00, 0x47702004, 0x461cb5f8, 0x460e4615,\n 0x9f082304, 0xfe99f7ff, 0xd1192800, 0xd0172d00, 0x447a4a0f, 0x60066810, 0x2102480e, 0x990671c1,\n 0x681172c1, 0x60886820, 0xfeaef7ff, 0xd0082800, 0x29009907, 0x600ed000, 0xd0012f00, 0x60392100,\n 0x1f2dbdf8, 0x1d361d24, 0xd1e12d00, 0x0000bdf8, 0x00000062, 0x40020000, 0x00040002, 0x00080000,\n 0x00100000, 0x00200000, 0x00400000, 0x00000000, 0x00000000, 0x00200000, 0x40020004, 0x00000000,\n\n ],\n\n 'pc_init' : 0x2000027D,\n 'pc_unInit': 0x200002F9,\n 'pc_program_page': 0x200002B1,\n 'pc_erase_sector': 0x2000023D,\n 'pc_eraseAll' : 0x20000209,\n\n 'static_base' : 0x20000000 + 0x00000020 + 0x0000063c,\n 'begin_stack' : 0x20000000 + 0x00000800,\n 'begin_data' : 0x20000000 + 0x00000A00,\n 'page_buffers' : [0x20000a00, 0x20001200], # Enable double buffering\n 'min_program_length' : 4,\n 'analyzer_supported' : True,\n 'analyzer_address' : 0x1ffff800\n }\n\nclass KV11Z7(Kinetis):\n\n MEMORY_MAP = MemoryMap(\n FlashRegion( start=0, length=0x20000, blocksize=0x400, is_boot_memory=True,\n algo=FLASH_ALGO, flash_class=Flash_Kinetis),\n RamRegion( start=0x1ffff000, length=0x4000)\n )\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin(\"MKV11Z7.svd\")\n\n",
"from ..family.target_kinetis import Kinetis\nfrom ..family.flash_kinetis import Flash_Kinetis\nfrom ...core.memory_map import FlashRegion, RamRegion, MemoryMap\nfrom ...debug.svd.loader import SVDFile\nFLASH_ALGO = {'load_address': 536870912, 'instructions': [3758800384, \n 103643149, 604520552, 3539992640, 509886552, 474599930, 704650834, \n 1198576114, 151200256, 3547546251, 1116408323, 184800088, 3543941771, \n 1116408835, 3759330081, 1124812291, 570479743, 1116407875, 151245684, \n 3546235531, 1116408323, 184800068, 3542631051, 1116408835, 587191053, \n 3121742345, 1116408835, 303223554, 3496280585, 1116408579, 3758150425, \n 197331465, 3540075147, 448791499, 193151314, 3540075147, 448791435, \n 188957010, 3540075147, 448791371, 184762706, 3540075147, 448791307, \n 180568402, 3540075147, 448791243, 176374098, 3540075147, 448791179, \n 172179794, 3540075147, 448791115, 167985490, 3540075147, 448791051, \n 3536666962, 1116408259, 30135041, 1095899840, 1116408195, 25940737, \n 1095899840, 1116408131, 21746433, 1095899840, 1116408067, 17552129, \n 1095899840, 1116408003, 13357825, 1095899840, 1116407939, 9163521, \n 1095899840, 1116407875, 4969217, 1095899840, 3523222081, 1095910913, \n 1198540304, 264953949, 1112133632, 3539996675, 1079198272, 1184637440, \n 1116408067, 168022829, 3541189259, 25764604, 168016402, 3540796043, \n 294781321, 3540533899, 294781321, 3540271755, 3493462409, 3758100882, \n 163776905, 3540075147, 448790987, 159596882, 3540075147, 448790923, \n 155402578, 3540075147, 448790859, 151208274, 3540075147, 448790795, \n 147013970, 3540075147, 448790731, 142819666, 3540075147, 448790667, \n 3537453394, 1116407875, 4969217, 1095899840, 3523222081, 1180911105, \n 274415954, 3540076048, 721437248, 1112134912, 1180911472, 3539996763, \n 3036758592, 1186996224, 3171043008, 3037743114, 1145587976, 4177326080,\n 3506710528, 553666566, 4026549320, 1241905609, 587294929, 1125712539, \n 3171967185, 1801807467, 4, 4026544128, 1275901296, 1145849349, \n 1259030017, 1759659552, 4171558912, 3506776064, 1177101056, 1759659552,\n 4184797184, 1758087429, 43721473, 1623868186, 48496, 4, 1801807467, \n 4026544128, 1208595728, 2176928007, 2176928008, 139036673, 2147549257, \n 1145587718, 4176670720, 3489671168, 3171950593, 50464, 1074077696, \n 55592, 4, 1175238000, 1174816267, 1208829441, 1175826564, 4026549320, \n 671152387, 2416038154, 553750530, 2432714759, 1176651307, 1145587249, \n 4185452544, 1758087428, 43721473, 1623868186, 3178278916, 4, 4026544128,\n 1198530560, 3490195456, 3624020239, 3506711044, 689168389, 705222657, \n 537186305, 536889200, 671106928, 537186561, 3020965744, 1175199323, \n 3506520604, 3489808922, 543538192, 1745045360, 3624157835, 411658304, \n 1116215320, 3155218946, 1198530662, 536919056, 18288, 1116227843, \n 543936513, 536889200, 18288, 1801807467, 561006602, 562065409, \n 2013360129, 3590063625, 109148160, 543675649, 113330032, 543741185, \n 130041712, 543805692, 18288, 1073872896, 1174779384, 1175209494, \n 4160701976, 671154135, 587518251, 1176585778, 4160701992, 524210, \n 430362915, 1760108150, 2432714288, 4264359935, 3489868032, 473996800, \n 511066950, 3625140916, 1148733450, 1610901504, 537479433, 4160713160, \n 1174929343, 671115688, 1199624192, 3506646784, 419719400, 3656073908, \n 3187164728, 634, 1073872896, 1174713616, 4160701960, 671154079, \n 738251014, 1225052165, 1908940868, 4288739327, 537181456, 48400, \n 1073872896, 3490457600, 3490327040, 3524929800, 1148911627, 417036571, \n 117785759, 218827015, 537137423, 1757431664, 1749082122, 1753276424, \n 1744887814, 536993796, 1761665026, 1765859328, 536895504, 543836016, \n 18288, 3490326528, 1758021903, 235489807, 1148846153, 50944593, \n 543478019, 537151344, 3020965744, 1610818304, 553738305, 42557569, \n 1225220289, 361396748, 1627537570, 1636006601, 3155190081, 1198540312, \n 1074036800, 426, 1073872928, 3506514432, 1198530564, 1176286719, \n 1175826561, 587482638, 2550220322, 4279891967, 3508076551, 3491113984, \n 1148733452, 1611556865, 1744882946, 1225416769, 1908940806, 4281399295,\n 2550220295, 671115648, 1199624192, 3506581248, 522460470, 1178128872, \n 3186667525, 354, 1073872896, 3489802240, 536895873, 537151344, 18288, \n 2961290751, 1175340564, 587482629, 4276615167, 3509200896, 1751673001, \n 4251252735, 1114738688, 1073824320, 1119306319, 2550190337, 738203711, \n 465424410, 3640672933, 1225606693, 1148782760, 1611556873, 570509579, \n 167932362, 1921544906, 1917360132, 4276811775, 3506841600, 459577344, \n 406788470, 3521391616, 2953125888, 48624, 218, 1073872896, 3506513920, \n 1198530564, 1208202512, 1908548160, 4160713089, 3172007631, 1073872896,\n 3506514688, 1198530564, 1176286712, 1175340565, 2668110596, 4271503359,\n 3508086784, 3491179776, 1148865039, 1611032592, 553797646, 2567336385, \n 1745973953, 1619552288, 4272879615, 3490195456, 687905031, 1611583488, \n 3489738496, 1614356736, 523091448, 490085668, 3521195264, 48632, 98, \n 1073872896, 262146, 524288, 1048576, 2097152, 4194304, 0, 0, 2097152, \n 1073872900, 0], 'pc_init': 536871549, 'pc_unInit': 536871673,\n 'pc_program_page': 536871601, 'pc_erase_sector': 536871485,\n 'pc_eraseAll': 536871433, 'static_base': 536870912 + 32 + 1596,\n 'begin_stack': 536870912 + 2048, 'begin_data': 536870912 + 2560,\n 'page_buffers': [536873472, 536875520], 'min_program_length': 4,\n 'analyzer_supported': True, 'analyzer_address': 536868864}\n\n\nclass KV11Z7(Kinetis):\n MEMORY_MAP = MemoryMap(FlashRegion(start=0, length=131072, blocksize=\n 1024, is_boot_memory=True, algo=FLASH_ALGO, flash_class=\n Flash_Kinetis), RamRegion(start=536866816, length=16384))\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin('MKV11Z7.svd')\n",
"<import token>\nFLASH_ALGO = {'load_address': 536870912, 'instructions': [3758800384, \n 103643149, 604520552, 3539992640, 509886552, 474599930, 704650834, \n 1198576114, 151200256, 3547546251, 1116408323, 184800088, 3543941771, \n 1116408835, 3759330081, 1124812291, 570479743, 1116407875, 151245684, \n 3546235531, 1116408323, 184800068, 3542631051, 1116408835, 587191053, \n 3121742345, 1116408835, 303223554, 3496280585, 1116408579, 3758150425, \n 197331465, 3540075147, 448791499, 193151314, 3540075147, 448791435, \n 188957010, 3540075147, 448791371, 184762706, 3540075147, 448791307, \n 180568402, 3540075147, 448791243, 176374098, 3540075147, 448791179, \n 172179794, 3540075147, 448791115, 167985490, 3540075147, 448791051, \n 3536666962, 1116408259, 30135041, 1095899840, 1116408195, 25940737, \n 1095899840, 1116408131, 21746433, 1095899840, 1116408067, 17552129, \n 1095899840, 1116408003, 13357825, 1095899840, 1116407939, 9163521, \n 1095899840, 1116407875, 4969217, 1095899840, 3523222081, 1095910913, \n 1198540304, 264953949, 1112133632, 3539996675, 1079198272, 1184637440, \n 1116408067, 168022829, 3541189259, 25764604, 168016402, 3540796043, \n 294781321, 3540533899, 294781321, 3540271755, 3493462409, 3758100882, \n 163776905, 3540075147, 448790987, 159596882, 3540075147, 448790923, \n 155402578, 3540075147, 448790859, 151208274, 3540075147, 448790795, \n 147013970, 3540075147, 448790731, 142819666, 3540075147, 448790667, \n 3537453394, 1116407875, 4969217, 1095899840, 3523222081, 1180911105, \n 274415954, 3540076048, 721437248, 1112134912, 1180911472, 3539996763, \n 3036758592, 1186996224, 3171043008, 3037743114, 1145587976, 4177326080,\n 3506710528, 553666566, 4026549320, 1241905609, 587294929, 1125712539, \n 3171967185, 1801807467, 4, 4026544128, 1275901296, 1145849349, \n 1259030017, 1759659552, 4171558912, 3506776064, 1177101056, 1759659552,\n 4184797184, 1758087429, 43721473, 1623868186, 48496, 4, 1801807467, \n 4026544128, 1208595728, 2176928007, 2176928008, 139036673, 2147549257, \n 1145587718, 4176670720, 3489671168, 3171950593, 50464, 1074077696, \n 55592, 4, 1175238000, 1174816267, 1208829441, 1175826564, 4026549320, \n 671152387, 2416038154, 553750530, 2432714759, 1176651307, 1145587249, \n 4185452544, 1758087428, 43721473, 1623868186, 3178278916, 4, 4026544128,\n 1198530560, 3490195456, 3624020239, 3506711044, 689168389, 705222657, \n 537186305, 536889200, 671106928, 537186561, 3020965744, 1175199323, \n 3506520604, 3489808922, 543538192, 1745045360, 3624157835, 411658304, \n 1116215320, 3155218946, 1198530662, 536919056, 18288, 1116227843, \n 543936513, 536889200, 18288, 1801807467, 561006602, 562065409, \n 2013360129, 3590063625, 109148160, 543675649, 113330032, 543741185, \n 130041712, 543805692, 18288, 1073872896, 1174779384, 1175209494, \n 4160701976, 671154135, 587518251, 1176585778, 4160701992, 524210, \n 430362915, 1760108150, 2432714288, 4264359935, 3489868032, 473996800, \n 511066950, 3625140916, 1148733450, 1610901504, 537479433, 4160713160, \n 1174929343, 671115688, 1199624192, 3506646784, 419719400, 3656073908, \n 3187164728, 634, 1073872896, 1174713616, 4160701960, 671154079, \n 738251014, 1225052165, 1908940868, 4288739327, 537181456, 48400, \n 1073872896, 3490457600, 3490327040, 3524929800, 1148911627, 417036571, \n 117785759, 218827015, 537137423, 1757431664, 1749082122, 1753276424, \n 1744887814, 536993796, 1761665026, 1765859328, 536895504, 543836016, \n 18288, 3490326528, 1758021903, 235489807, 1148846153, 50944593, \n 543478019, 537151344, 3020965744, 1610818304, 553738305, 42557569, \n 1225220289, 361396748, 1627537570, 1636006601, 3155190081, 1198540312, \n 1074036800, 426, 1073872928, 3506514432, 1198530564, 1176286719, \n 1175826561, 587482638, 2550220322, 4279891967, 3508076551, 3491113984, \n 1148733452, 1611556865, 1744882946, 1225416769, 1908940806, 4281399295,\n 2550220295, 671115648, 1199624192, 3506581248, 522460470, 1178128872, \n 3186667525, 354, 1073872896, 3489802240, 536895873, 537151344, 18288, \n 2961290751, 1175340564, 587482629, 4276615167, 3509200896, 1751673001, \n 4251252735, 1114738688, 1073824320, 1119306319, 2550190337, 738203711, \n 465424410, 3640672933, 1225606693, 1148782760, 1611556873, 570509579, \n 167932362, 1921544906, 1917360132, 4276811775, 3506841600, 459577344, \n 406788470, 3521391616, 2953125888, 48624, 218, 1073872896, 3506513920, \n 1198530564, 1208202512, 1908548160, 4160713089, 3172007631, 1073872896,\n 3506514688, 1198530564, 1176286712, 1175340565, 2668110596, 4271503359,\n 3508086784, 3491179776, 1148865039, 1611032592, 553797646, 2567336385, \n 1745973953, 1619552288, 4272879615, 3490195456, 687905031, 1611583488, \n 3489738496, 1614356736, 523091448, 490085668, 3521195264, 48632, 98, \n 1073872896, 262146, 524288, 1048576, 2097152, 4194304, 0, 0, 2097152, \n 1073872900, 0], 'pc_init': 536871549, 'pc_unInit': 536871673,\n 'pc_program_page': 536871601, 'pc_erase_sector': 536871485,\n 'pc_eraseAll': 536871433, 'static_base': 536870912 + 32 + 1596,\n 'begin_stack': 536870912 + 2048, 'begin_data': 536870912 + 2560,\n 'page_buffers': [536873472, 536875520], 'min_program_length': 4,\n 'analyzer_supported': True, 'analyzer_address': 536868864}\n\n\nclass KV11Z7(Kinetis):\n MEMORY_MAP = MemoryMap(FlashRegion(start=0, length=131072, blocksize=\n 1024, is_boot_memory=True, algo=FLASH_ALGO, flash_class=\n Flash_Kinetis), RamRegion(start=536866816, length=16384))\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin('MKV11Z7.svd')\n",
"<import token>\n<assignment token>\n\n\nclass KV11Z7(Kinetis):\n MEMORY_MAP = MemoryMap(FlashRegion(start=0, length=131072, blocksize=\n 1024, is_boot_memory=True, algo=FLASH_ALGO, flash_class=\n Flash_Kinetis), RamRegion(start=536866816, length=16384))\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin('MKV11Z7.svd')\n",
"<import token>\n<assignment token>\n\n\nclass KV11Z7(Kinetis):\n <assignment token>\n\n def __init__(self, session):\n super(KV11Z7, self).__init__(session, self.MEMORY_MAP)\n self._svd_location = SVDFile.from_builtin('MKV11Z7.svd')\n",
"<import token>\n<assignment token>\n\n\nclass KV11Z7(Kinetis):\n <assignment token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
553 |
18c2fe40b51ad1489d55aa2be068a1c4f381a2a5
|
import datetime
from django.db import models
from django.utils import timezone
class Acoount(models.Model):
first_name = models.CharField("Ім\'я", max_length=50)
last_name = models.CharField('Прізвище', max_length=50)
username = models.CharField('Псевдонім', max_length=50)
email = models.CharField('Електронна почта', max_length=16)
password = models.CharField('Пароль', max_length=16)
def __str__(self):
return self.first_name + ' ' + self.last_name
class Meta:
verbose_name = 'Акаунт'
verbose_name_plural = 'Акаунти'
|
[
"import datetime\nfrom django.db import models\n\nfrom django.utils import timezone\n\n\nclass Acoount(models.Model):\n first_name = models.CharField(\"Ім\\'я\", max_length=50)\n last_name = models.CharField('Прізвище', max_length=50)\n username = models.CharField('Псевдонім', max_length=50)\n email = models.CharField('Електронна почта', max_length=16)\n password = models.CharField('Пароль', max_length=16)\n \n\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'",
"import datetime\nfrom django.db import models\nfrom django.utils import timezone\n\n\nclass Acoount(models.Model):\n first_name = models.CharField(\"Ім'я\", max_length=50)\n last_name = models.CharField('Прізвище', max_length=50)\n username = models.CharField('Псевдонім', max_length=50)\n email = models.CharField('Електронна почта', max_length=16)\n password = models.CharField('Пароль', max_length=16)\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n",
"<import token>\n\n\nclass Acoount(models.Model):\n first_name = models.CharField(\"Ім'я\", max_length=50)\n last_name = models.CharField('Прізвище', max_length=50)\n username = models.CharField('Псевдонім', max_length=50)\n email = models.CharField('Електронна почта', max_length=16)\n password = models.CharField('Пароль', max_length=16)\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n",
"<import token>\n\n\nclass Acoount(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.first_name + ' ' + self.last_name\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n",
"<import token>\n\n\nclass Acoount(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\n class Meta:\n verbose_name = 'Акаунт'\n verbose_name_plural = 'Акаунти'\n",
"<import token>\n<class token>\n"
] | false |
554 |
3f8c13be547099aa6612365452926db95828b9a0
|
from setuptools import setup
setup(name='RedHatSecurityAdvisory',
version='0.1',
description='Script that automatically checks the RedHat security advisories to see if a CVE applies',
author='Pieter-Jan Moreels',
url='https://github.com/PidgeyL/RedHat-Advisory-Checker',
entry_points={'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']},
packages=['RHSA'],
license="Modified BSD license",
)
|
[
"from setuptools import setup\n\nsetup(name='RedHatSecurityAdvisory',\n version='0.1',\n description='Script that automatically checks the RedHat security advisories to see if a CVE applies',\n author='Pieter-Jan Moreels',\n url='https://github.com/PidgeyL/RedHat-Advisory-Checker',\n entry_points={'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']},\n packages=['RHSA'],\n license=\"Modified BSD license\",\n )\n",
"from setuptools import setup\nsetup(name='RedHatSecurityAdvisory', version='0.1', description=\n 'Script that automatically checks the RedHat security advisories to see if a CVE applies'\n , author='Pieter-Jan Moreels', url=\n 'https://github.com/PidgeyL/RedHat-Advisory-Checker', entry_points={\n 'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']}, packages=[\n 'RHSA'], license='Modified BSD license')\n",
"<import token>\nsetup(name='RedHatSecurityAdvisory', version='0.1', description=\n 'Script that automatically checks the RedHat security advisories to see if a CVE applies'\n , author='Pieter-Jan Moreels', url=\n 'https://github.com/PidgeyL/RedHat-Advisory-Checker', entry_points={\n 'console_scripts': ['rhsa = RHSA:redhatAdvisory.main']}, packages=[\n 'RHSA'], license='Modified BSD license')\n",
"<import token>\n<code token>\n"
] | false |
555 |
5ef65ace397be17be62625ed27b5753d15565d61
|
from abc import ABC, abstractmethod
class DatasetFileManager(ABC):
@abstractmethod
def read_dataset(self):
pass
|
[
"from abc import ABC, abstractmethod\n\n\nclass DatasetFileManager(ABC):\n @abstractmethod\n def read_dataset(self):\n pass\n",
"from abc import ABC, abstractmethod\n\n\nclass DatasetFileManager(ABC):\n\n @abstractmethod\n def read_dataset(self):\n pass\n",
"<import token>\n\n\nclass DatasetFileManager(ABC):\n\n @abstractmethod\n def read_dataset(self):\n pass\n",
"<import token>\n\n\nclass DatasetFileManager(ABC):\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
556 |
236dd70dec8d53062d6c38c370cb8f11dc5ef9d0
|
import thinkbayes2 as thinkbayes
from thinkbayes2 import Pmf
import thinkplot
class Dice2(Pmf):
def __init__(self, sides):
Pmf.__init__(self)
for x in range(1, sides + 1):
self.Set(x, 1)
self.Normalize()
if __name__ == "__main__":
d6 = Dice2(6)
dices = [d6] * 6
three = thinkbayes.SampleSum(dices, 1000)
thinkplot.Pmf(three)
|
[
"import thinkbayes2 as thinkbayes\nfrom thinkbayes2 import Pmf\nimport thinkplot\n\n\nclass Dice2(Pmf):\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == \"__main__\":\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"import thinkbayes2 as thinkbayes\nfrom thinkbayes2 import Pmf\nimport thinkplot\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == '__main__':\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"<import token>\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\nif __name__ == '__main__':\n d6 = Dice2(6)\n dices = [d6] * 6\n three = thinkbayes.SampleSum(dices, 1000)\n thinkplot.Pmf(three)\n",
"<import token>\n\n\nclass Dice2(Pmf):\n\n def __init__(self, sides):\n Pmf.__init__(self)\n for x in range(1, sides + 1):\n self.Set(x, 1)\n self.Normalize()\n\n\n<code token>\n",
"<import token>\n\n\nclass Dice2(Pmf):\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
557 |
b9bd1c0f4a5d2e6eeb75ba4f27d33ad5fb22530e
|
# coding: utf-8
"""
Upbit Open API
## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [[email protected]] # noqa: E501
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DepositCompleteResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'currency': 'str',
'deposit_address': 'str',
'secondary_address': 'str'
}
attribute_map = {
'currency': 'currency',
'deposit_address': 'deposit_address',
'secondary_address': 'secondary_address'
}
def __init__(self, currency=None, deposit_address=None, secondary_address=None): # noqa: E501
"""DepositCompleteResponse - a model defined in Swagger""" # noqa: E501
self._currency = None
self._deposit_address = None
self._secondary_address = None
self.discriminator = None
if currency is not None:
self.currency = currency
if deposit_address is not None:
self.deposit_address = deposit_address
if secondary_address is not None:
self.secondary_address = secondary_address
@property
def currency(self):
"""Gets the currency of this DepositCompleteResponse. # noqa: E501
화폐를 의미하는 영문 대문자 코드 # noqa: E501
:return: The currency of this DepositCompleteResponse. # noqa: E501
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this DepositCompleteResponse.
화폐를 의미하는 영문 대문자 코드 # noqa: E501
:param currency: The currency of this DepositCompleteResponse. # noqa: E501
:type: str
"""
self._currency = currency
@property
def deposit_address(self):
"""Gets the deposit_address of this DepositCompleteResponse. # noqa: E501
입금 주소 # noqa: E501
:return: The deposit_address of this DepositCompleteResponse. # noqa: E501
:rtype: str
"""
return self._deposit_address
@deposit_address.setter
def deposit_address(self, deposit_address):
"""Sets the deposit_address of this DepositCompleteResponse.
입금 주소 # noqa: E501
:param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501
:type: str
"""
self._deposit_address = deposit_address
@property
def secondary_address(self):
"""Gets the secondary_address of this DepositCompleteResponse. # noqa: E501
2차 입금 주소 # noqa: E501
:return: The secondary_address of this DepositCompleteResponse. # noqa: E501
:rtype: str
"""
return self._secondary_address
@secondary_address.setter
def secondary_address(self, secondary_address):
"""Sets the secondary_address of this DepositCompleteResponse.
2차 입금 주소 # noqa: E501
:param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501
:type: str
"""
self._secondary_address = secondary_address
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DepositCompleteResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DepositCompleteResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"# coding: utf-8\n\n\"\"\"\n Upbit Open API\n\n ## REST API for Upbit Exchange - Base URL: [https://api.upbit.com] - Official Upbit API Documents: [https://docs.upbit.com] - Official Support email: [[email protected]] # noqa: E501\n\n OpenAPI spec version: 1.0.0\n Contact: [email protected]\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass DepositCompleteResponse(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'currency': 'str',\n 'deposit_address': 'str',\n 'secondary_address': 'str'\n }\n\n attribute_map = {\n 'currency': 'currency',\n 'deposit_address': 'deposit_address',\n 'secondary_address': 'secondary_address'\n }\n\n def __init__(self, currency=None, deposit_address=None, secondary_address=None): # noqa: E501\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\" # noqa: E501\n\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"<docstring token>\nimport pprint\nimport re\nimport six\n\n\nclass DepositCompleteResponse(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {'currency': 'str', 'deposit_address': 'str',\n 'secondary_address': 'str'}\n attribute_map = {'currency': 'currency', 'deposit_address':\n 'deposit_address', 'secondary_address': 'secondary_address'}\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {'currency': 'str', 'deposit_address': 'str',\n 'secondary_address': 'str'}\n attribute_map = {'currency': 'currency', 'deposit_address':\n 'deposit_address', 'secondary_address': 'secondary_address'}\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n swagger_types = {'currency': 'str', 'deposit_address': 'str',\n 'secondary_address': 'str'}\n attribute_map = {'currency': 'currency', 'deposit_address':\n 'deposit_address', 'secondary_address': 'secondary_address'}\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DepositCompleteResponse):\n return False\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n\n @deposit_address.setter\n def deposit_address(self, deposit_address):\n \"\"\"Sets the deposit_address of this DepositCompleteResponse.\n\n 입금 주소 # noqa: E501\n\n :param deposit_address: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._deposit_address = deposit_address\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <function token>\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(lambda x: x.to_dict() if hasattr(x,\n 'to_dict') else x, value))\n elif hasattr(value, 'to_dict'):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(lambda item: (item[0], item[1].\n to_dict()) if hasattr(item[1], 'to_dict') else item,\n value.items()))\n else:\n result[attr] = value\n if issubclass(DepositCompleteResponse, dict):\n for key, value in self.items():\n result[key] = value\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <function token>\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n <function token>\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n\n @property\n def currency(self):\n \"\"\"Gets the currency of this DepositCompleteResponse. # noqa: E501\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :return: The currency of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._currency\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <function token>\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n <function token>\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n <function token>\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <function token>\n\n @property\n def secondary_address(self):\n \"\"\"Gets the secondary_address of this DepositCompleteResponse. # noqa: E501\n\n 2차 입금 주소 # noqa: E501\n\n :return: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._secondary_address\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n <function token>\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n <function token>\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <function token>\n <function token>\n\n @secondary_address.setter\n def secondary_address(self, secondary_address):\n \"\"\"Sets the secondary_address of this DepositCompleteResponse.\n\n 2차 입금 주소 # noqa: E501\n\n :param secondary_address: The secondary_address of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._secondary_address = secondary_address\n <function token>\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n <function token>\n\n @currency.setter\n def currency(self, currency):\n \"\"\"Sets the currency of this DepositCompleteResponse.\n\n 화폐를 의미하는 영문 대문자 코드 # noqa: E501\n\n :param currency: The currency of this DepositCompleteResponse. # noqa: E501\n :type: str\n \"\"\"\n self._currency = currency\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <function token>\n <function token>\n <function token>\n <function token>\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n <function token>\n <function token>\n\n @property\n def deposit_address(self):\n \"\"\"Gets the deposit_address of this DepositCompleteResponse. # noqa: E501\n\n 입금 주소 # noqa: E501\n\n :return: The deposit_address of this DepositCompleteResponse. # noqa: E501\n :rtype: str\n \"\"\"\n return self._deposit_address\n <function token>\n <function token>\n <function token>\n <function token>\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n\n def __init__(self, currency=None, deposit_address=None,\n secondary_address=None):\n \"\"\"DepositCompleteResponse - a model defined in Swagger\"\"\"\n self._currency = None\n self._deposit_address = None\n self._secondary_address = None\n self.discriminator = None\n if currency is not None:\n self.currency = currency\n if deposit_address is not None:\n self.deposit_address = deposit_address\n if secondary_address is not None:\n self.secondary_address = secondary_address\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n\n\nclass DepositCompleteResponse(object):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
558 |
d1dc807ecc92d9108db2c9bd00ee9781e174a1aa
|
from datetime import datetime
from django.core import mail
from entity_event import context_loader
from entity_emailer.models import Email
from entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, \
create_email_message, extract_email_subject_from_html_content
class EntityEmailerInterface(object):
"""
An api interface to do things within entity emailer
"""
@staticmethod
def send_unsent_scheduled_emails():
"""
Send out any scheduled emails that are unsent
"""
current_time = datetime.utcnow()
email_medium = get_medium()
to_send = Email.objects.filter(
scheduled__lte=current_time,
sent__isnull=True
).select_related(
'event'
).prefetch_related(
'recipients'
)
# Fetch the contexts of every event so that they may be rendered
context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])
emails = []
for email in to_send:
to_email_addresses = get_subscribed_email_addresses(email)
if to_email_addresses:
text_message, html_message = email.render(email_medium)
message = create_email_message(
to_emails=to_email_addresses,
from_email=email.from_address or get_from_email_address(),
subject=email.subject or extract_email_subject_from_html_content(html_message),
text=text_message,
html=html_message,
)
emails.append(message)
connection = mail.get_connection()
connection.send_messages(emails)
to_send.update(sent=current_time)
@staticmethod
def convert_events_to_emails():
"""
Converts unseen events to emails and marks them as seen.
"""
# Get the email medium
email_medium = get_medium()
# Get the default from email
default_from_email = get_from_email_address()
# Find any unseen events and create unsent email objects
for event, targets in email_medium.events_targets(seen=False, mark_seen=True):
# Check the event's context for a from_address, otherwise fallback to default
from_address = event.context.get('from_address') or default_from_email
# Create the emails
Email.objects.create_email(event=event, from_address=from_address, recipients=targets)
|
[
"from datetime import datetime\n\nfrom django.core import mail\nfrom entity_event import context_loader\n\nfrom entity_emailer.models import Email\n\nfrom entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, \\\n create_email_message, extract_email_subject_from_html_content\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(\n scheduled__lte=current_time,\n sent__isnull=True\n ).select_related(\n 'event'\n ).prefetch_related(\n 'recipients'\n )\n\n # Fetch the contexts of every event so that they may be rendered\n context_loader.load_contexts_and_renderers([e.event for e in to_send], [email_medium])\n\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(\n to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address(),\n subject=email.subject or extract_email_subject_from_html_content(html_message),\n text=text_message,\n html=html_message,\n )\n emails.append(message)\n\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n\n # Get the email medium\n email_medium = get_medium()\n\n # Get the default from email\n default_from_email = get_from_email_address()\n\n # Find any unseen events and create unsent email objects\n for event, targets in email_medium.events_targets(seen=False, mark_seen=True):\n\n # Check the event's context for a from_address, otherwise fallback to default\n from_address = event.context.get('from_address') or default_from_email\n\n # Create the emails\n Email.objects.create_email(event=event, from_address=from_address, recipients=targets)\n",
"from datetime import datetime\nfrom django.core import mail\nfrom entity_event import context_loader\nfrom entity_emailer.models import Email\nfrom entity_emailer.utils import get_medium, get_from_email_address, get_subscribed_email_addresses, create_email_message, extract_email_subject_from_html_content\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"<import token>\n\n\nclass EntityEmailerInterface(object):\n \"\"\"\n An api interface to do things within entity emailer\n \"\"\"\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"<import token>\n\n\nclass EntityEmailerInterface(object):\n <docstring token>\n\n @staticmethod\n def send_unsent_scheduled_emails():\n \"\"\"\n Send out any scheduled emails that are unsent\n \"\"\"\n current_time = datetime.utcnow()\n email_medium = get_medium()\n to_send = Email.objects.filter(scheduled__lte=current_time,\n sent__isnull=True).select_related('event').prefetch_related(\n 'recipients')\n context_loader.load_contexts_and_renderers([e.event for e in\n to_send], [email_medium])\n emails = []\n for email in to_send:\n to_email_addresses = get_subscribed_email_addresses(email)\n if to_email_addresses:\n text_message, html_message = email.render(email_medium)\n message = create_email_message(to_emails=to_email_addresses,\n from_email=email.from_address or get_from_email_address\n (), subject=email.subject or\n extract_email_subject_from_html_content(html_message),\n text=text_message, html=html_message)\n emails.append(message)\n connection = mail.get_connection()\n connection.send_messages(emails)\n to_send.update(sent=current_time)\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"<import token>\n\n\nclass EntityEmailerInterface(object):\n <docstring token>\n <function token>\n\n @staticmethod\n def convert_events_to_emails():\n \"\"\"\n Converts unseen events to emails and marks them as seen.\n \"\"\"\n email_medium = get_medium()\n default_from_email = get_from_email_address()\n for event, targets in email_medium.events_targets(seen=False,\n mark_seen=True):\n from_address = event.context.get('from_address'\n ) or default_from_email\n Email.objects.create_email(event=event, from_address=\n from_address, recipients=targets)\n",
"<import token>\n\n\nclass EntityEmailerInterface(object):\n <docstring token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
559 |
7a1be5c9c48413ba1969631e99ecb45cf15ef613
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('Registration', '0015_auto_20150525_1815'),
]
operations = [
migrations.AlterField(
model_name='user',
name='created_date',
field=models.DateField(auto_now_add=True),
),
migrations.AlterField(
model_name='user',
name='last_login',
field=models.DateTimeField(null=True, verbose_name='last login', blank=True),
),
migrations.AlterField(
model_name='user',
name='modified_date',
field=models.DateField(auto_now=True),
),
migrations.AlterField(
model_name='user_skills',
name='percentage',
field=models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
]
|
[
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Registration', '0015_auto_20150525_1815'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='user',\n name='created_date',\n field=models.DateField(auto_now_add=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='last_login',\n field=models.DateTimeField(null=True, verbose_name='last login', blank=True),\n ),\n migrations.AlterField(\n model_name='user',\n name='modified_date',\n field=models.DateField(auto_now=True),\n ),\n migrations.AlterField(\n model_name='user_skills',\n name='percentage',\n field=models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),\n ),\n ]\n",
"from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.core.validators\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Registration', '0015_auto_20150525_1815')]\n operations = [migrations.AlterField(model_name='user', name=\n 'created_date', field=models.DateField(auto_now_add=True)),\n migrations.AlterField(model_name='user', name='last_login', field=\n models.DateTimeField(null=True, verbose_name='last login', blank=\n True)), migrations.AlterField(model_name='user', name=\n 'modified_date', field=models.DateField(auto_now=True)), migrations\n .AlterField(model_name='user_skills', name='percentage', field=\n models.PositiveSmallIntegerField(default=0, validators=[django.core\n .validators.MinValueValidator(0), django.core.validators.\n MaxValueValidator(100)]))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('Registration', '0015_auto_20150525_1815')]\n operations = [migrations.AlterField(model_name='user', name=\n 'created_date', field=models.DateField(auto_now_add=True)),\n migrations.AlterField(model_name='user', name='last_login', field=\n models.DateTimeField(null=True, verbose_name='last login', blank=\n True)), migrations.AlterField(model_name='user', name=\n 'modified_date', field=models.DateField(auto_now=True)), migrations\n .AlterField(model_name='user_skills', name='percentage', field=\n models.PositiveSmallIntegerField(default=0, validators=[django.core\n .validators.MinValueValidator(0), django.core.validators.\n MaxValueValidator(100)]))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
560 |
34b23e80b3c4aaf62f31c19fee0b47ace1561a8c
|
# cor = input('Escolha uma cor: ')
# print(f"Cor escolhida {cor:=^10}\n"
# f"Cor escolhida {cor:>10}\n"
# f"Cor escolhida {cor:<10}\n")
n1 = 7
n2 = 3
#print(f'Soma {n1+n2}')
s = n1 + n2
m = n1 * n2
d = n1 / n2
di = n1 // n2
e = n1 ** n2
print(f's = {s}\n m = {m}\n d = {d:.2f}\n di = {di}\n e = {e}', end='')
|
[
"# cor = input('Escolha uma cor: ')\n# print(f\"Cor escolhida {cor:=^10}\\n\"\n # f\"Cor escolhida {cor:>10}\\n\"\n # f\"Cor escolhida {cor:<10}\\n\")\n\nn1 = 7\nn2 = 3\n\n#print(f'Soma {n1+n2}')\n\ns = n1 + n2\nm = n1 * n2\nd = n1 / n2\ndi = n1 // n2\ne = n1 ** n2\nprint(f's = {s}\\n m = {m}\\n d = {d:.2f}\\n di = {di}\\n e = {e}', end='')\n",
"n1 = 7\nn2 = 3\ns = n1 + n2\nm = n1 * n2\nd = n1 / n2\ndi = n1 // n2\ne = n1 ** n2\nprint(f\"\"\"s = {s}\n m = {m}\n d = {d:.2f}\n di = {di}\n e = {e}\"\"\", end='')\n",
"<assignment token>\nprint(f\"\"\"s = {s}\n m = {m}\n d = {d:.2f}\n di = {di}\n e = {e}\"\"\", end='')\n",
"<assignment token>\n<code token>\n"
] | false |
561 |
c52d1c187edb17e85a8e2b47aa6731bc9a41ab1b
|
print ("Hello Workls!")
|
[
"print (\"Hello Workls!\")\n",
"print('Hello Workls!')\n",
"<code token>\n"
] | false |
562 |
de3a4053b5b0d4d2d5c2dcd317e64cf9b4faeb75
|
from urllib.parse import quote
from top_model import db
from top_model.ext.flask import FlaskTopModel
from top_model.filesystem import ProductPhotoCIP
from top_model.webstore import Product, Labo
from unrest import UnRest
class Hydra(FlaskTopModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.config['CLIENT_ID'] = 4
self.config['BASE_IMAGE_URL'] = (
'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}')
self.config['SQLALCHEMY_DATABASE_URI'] = (
'pgfdw://hydra@localhost/hydra')
self.config.from_envvar('MEDBOX_SETTINGS', silent=True)
self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])
def filter_query(query):
return query.filter_by(client_id=app.config['CLIENT_ID'])
app = Hydra(__name__)
rest = UnRest(app, db.session)
rest(Labo, only=('label',))
product_api = rest(Product, query=filter_query, only=(
'product_id', 'title', 'description', 'cip', 'resip_labo_code',
'type_product'))
image_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))
@image_api.declare('GET')
def get_image(payload, cip, name, ext):
result = image_api.get(payload, cip=cip)
for obj in getattr(result, 'data', result)['objects']:
obj['name'] = quote(obj['name'])
obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)
return result
@product_api.declare('GET')
def get_product(payload, product_id):
products = (
Product.query
.filter_by(cip=str(product_id))
.filter_by(client_id=app.config['CLIENT_ID'])
.all())
if products:
return product_api.get(payload, product_id=products[0].product_id)
else:
return {'objects': [], 'occurences': 0}
|
[
"from urllib.parse import quote\n\nfrom top_model import db\nfrom top_model.ext.flask import FlaskTopModel\nfrom top_model.filesystem import ProductPhotoCIP\nfrom top_model.webstore import Product, Labo\nfrom unrest import UnRest\n\n\nclass Hydra(FlaskTopModel):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'] = (\n 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}')\n self.config['SQLALCHEMY_DATABASE_URI'] = (\n 'pgfdw://hydra@localhost/hydra')\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\n\n\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=(\n 'product_id', 'title', 'description', 'cip', 'resip_labo_code',\n 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = (\n Product.query\n .filter_by(cip=str(product_id))\n .filter_by(client_id=app.config['CLIENT_ID'])\n .all())\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"from urllib.parse import quote\nfrom top_model import db\nfrom top_model.ext.flask import FlaskTopModel\nfrom top_model.filesystem import ProductPhotoCIP\nfrom top_model.webstore import Product, Labo\nfrom unrest import UnRest\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=('product_id', 'title',\n 'description', 'cip', 'resip_labo_code', 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"<import token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\napp = Hydra(__name__)\nrest = UnRest(app, db.session)\nrest(Labo, only=('label',))\nproduct_api = rest(Product, query=filter_query, only=('product_id', 'title',\n 'description', 'cip', 'resip_labo_code', 'type_product'))\nimage_api = rest(ProductPhotoCIP, only=('cip', 'name', 'ext'))\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"<import token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\n<assignment token>\nrest(Labo, only=('label',))\n<assignment token>\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"<import token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\n<assignment token>\n<code token>\n<assignment token>\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n@product_api.declare('GET')\ndef get_product(payload, product_id):\n products = Product.query.filter_by(cip=str(product_id)).filter_by(client_id\n =app.config['CLIENT_ID']).all()\n if products:\n return product_api.get(payload, product_id=products[0].product_id)\n else:\n return {'objects': [], 'occurences': 0}\n",
"<import token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\n<assignment token>\n<code token>\n<assignment token>\n\n\n@image_api.declare('GET')\ndef get_image(payload, cip, name, ext):\n result = image_api.get(payload, cip=cip)\n for obj in getattr(result, 'data', result)['objects']:\n obj['name'] = quote(obj['name'])\n obj['url'] = app.config['BASE_IMAGE_URL'].format(**obj)\n return result\n\n\n<function token>\n",
"<import token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\ndef filter_query(query):\n return query.filter_by(client_id=app.config['CLIENT_ID'])\n\n\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Hydra(FlaskTopModel):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.config['CLIENT_ID'] = 4\n self.config['BASE_IMAGE_URL'\n ] = 'https://static.pharminfo.fr/images/cip/{cip}/{name}.{ext}'\n self.config['SQLALCHEMY_DATABASE_URI'\n ] = 'pgfdw://hydra@localhost/hydra'\n self.config.from_envvar('MEDBOX_SETTINGS', silent=True)\n self.configure_db(self.config['SQLALCHEMY_DATABASE_URI'])\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass Hydra(FlaskTopModel):\n <function token>\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
563 |
5b91b7025b0e574d45f95a0585128018d83c17ea
|
something1
x = session.query(x).filter(y).count()
something2
y = session.query(
models.User, models.X,
).filter(
models.User.time > start_time,
models.User.id == user_id,
).count()
def something3():
x = session.query(
models.Review,
).filter(
models.Review.time < end_time,
).count()
something4
x = session.query(x, y).filter(bla).count()
x = session.query(x.X, y).filter(y > user_id).count()
x = session.query(
x.X, y.Y
).filter(x.X == 5).count()
something5
|
[
"something1\nx = session.query(x).filter(y).count()\nsomething2\ny = session.query(\n models.User, models.X,\n).filter(\n models.User.time > start_time,\n models.User.id == user_id,\n).count()\ndef something3():\n x = session.query(\n models.Review,\n ).filter(\n models.Review.time < end_time,\n ).count()\nsomething4\nx = session.query(x, y).filter(bla).count()\nx = session.query(x.X, y).filter(y > user_id).count()\nx = session.query(\n x.X, y.Y\n).filter(x.X == 5).count()\nsomething5\n",
"something1\nx = session.query(x).filter(y).count()\nsomething2\ny = session.query(models.User, models.X).filter(models.User.time >\n start_time, models.User.id == user_id).count()\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\nsomething4\nx = session.query(x, y).filter(bla).count()\nx = session.query(x.X, y).filter(y > user_id).count()\nx = session.query(x.X, y.Y).filter(x.X == 5).count()\nsomething5\n",
"something1\n<assignment token>\nsomething2\n<assignment token>\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\nsomething4\n<assignment token>\nsomething5\n",
"<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef something3():\n x = session.query(models.Review).filter(models.Review.time < end_time\n ).count()\n\n\n<code token>\n<assignment token>\n<code token>\n",
"<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
564 |
fe12f6d3408ab115c5c440c5b45a9014cfee6539
|
from django.urls import path,include
from .import views
urlpatterns = [
path('',views.home,name='home'),
path('category/',include('api.category.urls')),
path('product/',include('api.product.urls')),
path('user/',include('api.user.urls')),
path('order/',include('api.order.urls')),
path('payment/',include('api.payment.urls')),
]
|
[
"from django.urls import path,include\nfrom .import views\n\nurlpatterns = [\n path('',views.home,name='home'),\n path('category/',include('api.category.urls')),\n path('product/',include('api.product.urls')),\n path('user/',include('api.user.urls')),\n path('order/',include('api.order.urls')),\n path('payment/',include('api.payment.urls')),\n]\n",
"from django.urls import path, include\nfrom . import views\nurlpatterns = [path('', views.home, name='home'), path('category/', include\n ('api.category.urls')), path('product/', include('api.product.urls')),\n path('user/', include('api.user.urls')), path('order/', include(\n 'api.order.urls')), path('payment/', include('api.payment.urls'))]\n",
"<import token>\nurlpatterns = [path('', views.home, name='home'), path('category/', include\n ('api.category.urls')), path('product/', include('api.product.urls')),\n path('user/', include('api.user.urls')), path('order/', include(\n 'api.order.urls')), path('payment/', include('api.payment.urls'))]\n",
"<import token>\n<assignment token>\n"
] | false |
565 |
bb7910af5334641fd2db7146112afaff7a2e42b9
|
#!/usr/bin/env python
# coding: utf-8
from __future__ import print_function
import uuid
import cgi
import squareconnect
from squareconnect.rest import ApiException
from squareconnect.apis.transactions_api import TransactionsApi
from squareconnect.apis.locations_api import LocationsApi
from squareconnect.apis.customers_api import CustomersApi
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
nonce = form.getvalue('nonce')
# Get amount data
donation = form.getvalue('amount')
boxChecked = form.getvalue('boxChecked')
firstName = form.getvalue('firstname')
lastName = form.getvalue('lastname')
email = form.getvalue('email')
# The access token to use in all Connect API requests. Use your *sandbox* access
# token if you're just testing things out.
squareconnect.configuration.access_token = 'sandbox-sq0atb-kfvpHvEa9Mz2098Nozk1RQ'
# The ID of the business location to associate processed payments with.
# See [Retrieve your business's locations]
# (https://docs.connect.squareup.com/articles/getting-started/#retrievemerchantprofile)
# for an easy way to get your business's location IDs.
# If you're testing things out, use a sandbox location ID.
location_id = 'CBASEGLb1fOhVH4Uvvi1aY_bOawgAQ'
transactions_api_instance = TransactionsApi()
customers_api_instance = CustomersApi()
# Every payment you process with the SDK must have a unique idempotency key.
# If you're unsure whether a particular payment succeeded, you can reattempt
# it with the same idempotency key without worrying about double charging
# the buyer.
idempotency_key = str(uuid.uuid1())
# Monetary amounts are specified in the smallest unit of the applicable currency.
# This amount is in cents. It's also hard-coded for $1.00, which isn't very useful.
amount = {'amount': int(donation) * 100, 'currency': 'USD'}
customersList = []
# Add a customer to file
if boxChecked == "true":
heading = "Recurring Donation"
customerRequest = {'given_name': firstName, 'family_name': lastName, 'email_address': email}
try:
customerResponse = customers_api_instance.create_customer(customerRequest)
except ApiException as e:
print ("customer creation failed")
print (e)
exit()
customer = customerResponse.customer
customerCardRequest = {'card_nonce': nonce}
try:
customerCardResponse = customers_api_instance.create_customer_card(customer.id, customerCardRequest)
except:
print ("customer card creation failed")
exit()
customerCard = customerCardResponse.card
body = {'customer_id': customer.id, 'customer_card_id': customerCard.id, 'idempotency_key': idempotency_key, 'amount_money': amount}
customersList = customers_api_instance.list_customers()
else:
# To learn more about splitting transactions with additional recipients,
# see the Transactions API documentation on our [developer site]
# (https://docs.connect.squareup.com/payments/transactions/overview#mpt-overview).
heading = "One time Donation"
body = {'idempotency_key': idempotency_key, 'card_nonce': nonce, 'amount_money': amount}
# customersList = Non
# The SDK throws an exception if a Connect endpoint responds with anything besides
# a 200-level HTTP code. This block catches any exceptions that occur from the request.
try:
api_response = transactions_api_instance.charge(location_id, body)
res = api_response.transaction
except ApiException as e:
res = "Exception when calling TransactionApi->charge: {}".format(e)
# Display the result
print ('Content-type:text/html\r\n\r\n')
print ('<html>')
print ('<head>')
print ('<title>Square Payment</title>')
print ('</head>')
print ('<body>')
print ('<h2>Result: </h2>')
print( '<h2>{}</h2>'.format(heading))
print ('<p>{}</p>'.format(res))
if customersList:
print( '<h2>Customers stored on File: </h2>')
for customer in customersList.customers:
print ('<p>{}</p>'.format(customer))
print ('</body>')
print ('</html>')
|
[
"#!/usr/bin/env python\n# coding: utf-8\nfrom __future__ import print_function\nimport uuid\nimport cgi\n\nimport squareconnect\nfrom squareconnect.rest import ApiException\nfrom squareconnect.apis.transactions_api import TransactionsApi\nfrom squareconnect.apis.locations_api import LocationsApi\nfrom squareconnect.apis.customers_api import CustomersApi\n\n# Create instance of FieldStorage\nform = cgi.FieldStorage()\n\n# Get data from fields\nnonce = form.getvalue('nonce')\n# Get amount data\ndonation = form.getvalue('amount')\n\nboxChecked = form.getvalue('boxChecked')\nfirstName = form.getvalue('firstname')\nlastName = form.getvalue('lastname')\nemail = form.getvalue('email')\n\n\n# The access token to use in all Connect API requests. Use your *sandbox* access\n# token if you're just testing things out.\nsquareconnect.configuration.access_token = 'sandbox-sq0atb-kfvpHvEa9Mz2098Nozk1RQ'\n\n# The ID of the business location to associate processed payments with.\n# See [Retrieve your business's locations]\n# (https://docs.connect.squareup.com/articles/getting-started/#retrievemerchantprofile)\n# for an easy way to get your business's location IDs.\n# If you're testing things out, use a sandbox location ID.\nlocation_id = 'CBASEGLb1fOhVH4Uvvi1aY_bOawgAQ'\n\ntransactions_api_instance = TransactionsApi()\ncustomers_api_instance = CustomersApi()\n\n# Every payment you process with the SDK must have a unique idempotency key.\n# If you're unsure whether a particular payment succeeded, you can reattempt\n# it with the same idempotency key without worrying about double charging\n# the buyer.\nidempotency_key = str(uuid.uuid1())\n\n# Monetary amounts are specified in the smallest unit of the applicable currency.\n# This amount is in cents. It's also hard-coded for $1.00, which isn't very useful.\namount = {'amount': int(donation) * 100, 'currency': 'USD'}\n\ncustomersList = []\n\n# Add a customer to file\nif boxChecked == \"true\": \n\theading = \"Recurring Donation\"\n\tcustomerRequest = {'given_name': firstName, 'family_name': lastName, 'email_address': email}\n\n\ttry:\n\t\tcustomerResponse = customers_api_instance.create_customer(customerRequest)\n\texcept ApiException as e:\n\t\tprint (\"customer creation failed\")\n\t\tprint (e)\n\t\texit()\n\n\tcustomer = customerResponse.customer\n\tcustomerCardRequest = {'card_nonce': nonce}\n\n\ttry:\n\t\tcustomerCardResponse = customers_api_instance.create_customer_card(customer.id, customerCardRequest)\n\texcept:\n\t\tprint (\"customer card creation failed\")\n\t\texit()\n\n\tcustomerCard = customerCardResponse.card\n\n\tbody = {'customer_id': customer.id, 'customer_card_id': customerCard.id, 'idempotency_key': idempotency_key, 'amount_money': amount}\n\tcustomersList = customers_api_instance.list_customers()\nelse:\n\t# To learn more about splitting transactions with additional recipients,\n\t# see the Transactions API documentation on our [developer site]\n\t# (https://docs.connect.squareup.com/payments/transactions/overview#mpt-overview).\n\theading = \"One time Donation\"\n\tbody = {'idempotency_key': idempotency_key, 'card_nonce': nonce, 'amount_money': amount}\n\t# customersList = Non\n\n\n# The SDK throws an exception if a Connect endpoint responds with anything besides\n# a 200-level HTTP code. This block catches any exceptions that occur from the request.\ntry:\n api_response = transactions_api_instance.charge(location_id, body)\n res = api_response.transaction\nexcept ApiException as e:\n res = \"Exception when calling TransactionApi->charge: {}\".format(e)\n\n# Display the result\nprint ('Content-type:text/html\\r\\n\\r\\n')\nprint ('<html>')\nprint ('<head>')\nprint ('<title>Square Payment</title>')\nprint ('</head>')\nprint ('<body>')\nprint ('<h2>Result: </h2>')\nprint( '<h2>{}</h2>'.format(heading))\nprint ('<p>{}</p>'.format(res))\nif customersList:\n\tprint( '<h2>Customers stored on File: </h2>')\n\tfor customer in customersList.customers:\n\t\tprint ('<p>{}</p>'.format(customer))\n\nprint ('</body>')\nprint ('</html>')\n",
"from __future__ import print_function\nimport uuid\nimport cgi\nimport squareconnect\nfrom squareconnect.rest import ApiException\nfrom squareconnect.apis.transactions_api import TransactionsApi\nfrom squareconnect.apis.locations_api import LocationsApi\nfrom squareconnect.apis.customers_api import CustomersApi\nform = cgi.FieldStorage()\nnonce = form.getvalue('nonce')\ndonation = form.getvalue('amount')\nboxChecked = form.getvalue('boxChecked')\nfirstName = form.getvalue('firstname')\nlastName = form.getvalue('lastname')\nemail = form.getvalue('email')\nsquareconnect.configuration.access_token = (\n 'sandbox-sq0atb-kfvpHvEa9Mz2098Nozk1RQ')\nlocation_id = 'CBASEGLb1fOhVH4Uvvi1aY_bOawgAQ'\ntransactions_api_instance = TransactionsApi()\ncustomers_api_instance = CustomersApi()\nidempotency_key = str(uuid.uuid1())\namount = {'amount': int(donation) * 100, 'currency': 'USD'}\ncustomersList = []\nif boxChecked == 'true':\n heading = 'Recurring Donation'\n customerRequest = {'given_name': firstName, 'family_name': lastName,\n 'email_address': email}\n try:\n customerResponse = customers_api_instance.create_customer(\n customerRequest)\n except ApiException as e:\n print('customer creation failed')\n print(e)\n exit()\n customer = customerResponse.customer\n customerCardRequest = {'card_nonce': nonce}\n try:\n customerCardResponse = customers_api_instance.create_customer_card(\n customer.id, customerCardRequest)\n except:\n print('customer card creation failed')\n exit()\n customerCard = customerCardResponse.card\n body = {'customer_id': customer.id, 'customer_card_id': customerCard.id,\n 'idempotency_key': idempotency_key, 'amount_money': amount}\n customersList = customers_api_instance.list_customers()\nelse:\n heading = 'One time Donation'\n body = {'idempotency_key': idempotency_key, 'card_nonce': nonce,\n 'amount_money': amount}\ntry:\n api_response = transactions_api_instance.charge(location_id, body)\n res = api_response.transaction\nexcept ApiException as e:\n res = 'Exception when calling TransactionApi->charge: {}'.format(e)\nprint('Content-type:text/html\\r\\n\\r\\n')\nprint('<html>')\nprint('<head>')\nprint('<title>Square Payment</title>')\nprint('</head>')\nprint('<body>')\nprint('<h2>Result: </h2>')\nprint('<h2>{}</h2>'.format(heading))\nprint('<p>{}</p>'.format(res))\nif customersList:\n print('<h2>Customers stored on File: </h2>')\n for customer in customersList.customers:\n print('<p>{}</p>'.format(customer))\nprint('</body>')\nprint('</html>')\n",
"<import token>\nform = cgi.FieldStorage()\nnonce = form.getvalue('nonce')\ndonation = form.getvalue('amount')\nboxChecked = form.getvalue('boxChecked')\nfirstName = form.getvalue('firstname')\nlastName = form.getvalue('lastname')\nemail = form.getvalue('email')\nsquareconnect.configuration.access_token = (\n 'sandbox-sq0atb-kfvpHvEa9Mz2098Nozk1RQ')\nlocation_id = 'CBASEGLb1fOhVH4Uvvi1aY_bOawgAQ'\ntransactions_api_instance = TransactionsApi()\ncustomers_api_instance = CustomersApi()\nidempotency_key = str(uuid.uuid1())\namount = {'amount': int(donation) * 100, 'currency': 'USD'}\ncustomersList = []\nif boxChecked == 'true':\n heading = 'Recurring Donation'\n customerRequest = {'given_name': firstName, 'family_name': lastName,\n 'email_address': email}\n try:\n customerResponse = customers_api_instance.create_customer(\n customerRequest)\n except ApiException as e:\n print('customer creation failed')\n print(e)\n exit()\n customer = customerResponse.customer\n customerCardRequest = {'card_nonce': nonce}\n try:\n customerCardResponse = customers_api_instance.create_customer_card(\n customer.id, customerCardRequest)\n except:\n print('customer card creation failed')\n exit()\n customerCard = customerCardResponse.card\n body = {'customer_id': customer.id, 'customer_card_id': customerCard.id,\n 'idempotency_key': idempotency_key, 'amount_money': amount}\n customersList = customers_api_instance.list_customers()\nelse:\n heading = 'One time Donation'\n body = {'idempotency_key': idempotency_key, 'card_nonce': nonce,\n 'amount_money': amount}\ntry:\n api_response = transactions_api_instance.charge(location_id, body)\n res = api_response.transaction\nexcept ApiException as e:\n res = 'Exception when calling TransactionApi->charge: {}'.format(e)\nprint('Content-type:text/html\\r\\n\\r\\n')\nprint('<html>')\nprint('<head>')\nprint('<title>Square Payment</title>')\nprint('</head>')\nprint('<body>')\nprint('<h2>Result: </h2>')\nprint('<h2>{}</h2>'.format(heading))\nprint('<p>{}</p>'.format(res))\nif customersList:\n print('<h2>Customers stored on File: </h2>')\n for customer in customersList.customers:\n print('<p>{}</p>'.format(customer))\nprint('</body>')\nprint('</html>')\n",
"<import token>\n<assignment token>\nif boxChecked == 'true':\n heading = 'Recurring Donation'\n customerRequest = {'given_name': firstName, 'family_name': lastName,\n 'email_address': email}\n try:\n customerResponse = customers_api_instance.create_customer(\n customerRequest)\n except ApiException as e:\n print('customer creation failed')\n print(e)\n exit()\n customer = customerResponse.customer\n customerCardRequest = {'card_nonce': nonce}\n try:\n customerCardResponse = customers_api_instance.create_customer_card(\n customer.id, customerCardRequest)\n except:\n print('customer card creation failed')\n exit()\n customerCard = customerCardResponse.card\n body = {'customer_id': customer.id, 'customer_card_id': customerCard.id,\n 'idempotency_key': idempotency_key, 'amount_money': amount}\n customersList = customers_api_instance.list_customers()\nelse:\n heading = 'One time Donation'\n body = {'idempotency_key': idempotency_key, 'card_nonce': nonce,\n 'amount_money': amount}\ntry:\n api_response = transactions_api_instance.charge(location_id, body)\n res = api_response.transaction\nexcept ApiException as e:\n res = 'Exception when calling TransactionApi->charge: {}'.format(e)\nprint('Content-type:text/html\\r\\n\\r\\n')\nprint('<html>')\nprint('<head>')\nprint('<title>Square Payment</title>')\nprint('</head>')\nprint('<body>')\nprint('<h2>Result: </h2>')\nprint('<h2>{}</h2>'.format(heading))\nprint('<p>{}</p>'.format(res))\nif customersList:\n print('<h2>Customers stored on File: </h2>')\n for customer in customersList.customers:\n print('<p>{}</p>'.format(customer))\nprint('</body>')\nprint('</html>')\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
566 |
ea1d62c4a8c406dde9bb138ee045be5e682fdbfe
|
class Wspak:
"""Iterator zwracający wartości w odwróconym porządku"""
def __init__(self, data):
self.data = data
self.index = -2
self.i=len(data)-1
def __iter__(self):
return self
def __next__(self):
if self.index >= self.i:
raise StopIteration
self.index = self.index+2
return self.data[self.index]
d=(["sdasda","sdasdasd","sdsad232","dasda","dsada"])
g=(2,3,4,6,7)
d = [x for x in Wspak(d)]
for x in Wspak(g):
print(x)
print(d)
|
[
"class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i=len(data)-1\n\n def __iter__(self):\n return self\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index+2\n return self.data[self.index]\nd=([\"sdasda\",\"sdasdasd\",\"sdsad232\",\"dasda\",\"dsada\"])\ng=(2,3,4,6,7)\nd = [x for x in Wspak(d)]\nfor x in Wspak(g):\n print(x)\nprint(d)",
"class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\nd = ['sdasda', 'sdasdasd', 'sdsad232', 'dasda', 'dsada']\ng = 2, 3, 4, 6, 7\nd = [x for x in Wspak(d)]\nfor x in Wspak(g):\n print(x)\nprint(d)\n",
"class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<assignment token>\nfor x in Wspak(g):\n print(x)\nprint(d)\n",
"class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<assignment token>\n<code token>\n",
"class Wspak:\n <docstring token>\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<assignment token>\n<code token>\n",
"class Wspak:\n <docstring token>\n <function token>\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<assignment token>\n<code token>\n",
"class Wspak:\n <docstring token>\n <function token>\n\n def __iter__(self):\n return self\n <function token>\n\n\n<assignment token>\n<code token>\n",
"class Wspak:\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<class token>\n<assignment token>\n<code token>\n"
] | false |
567 |
ec64ddd01034debadb6674e71125f673f5de8367
|
from redis3barScore import StudyThreeBarsScore
from redisUtil import RedisTimeFrame
def test_score1() -> None:
package = {'close': 13.92,
'high': 14.57,
'low': 12.45,
'open': 13.4584,
'symbol': 'FANG',
'timestamp': 1627493640000000000,
'trade_count': 602,
'volume': 213907,
'vwap': 8.510506}
app = StudyThreeBarsScore()
newPrice = 13.70
realtime = []
symbol = "FANG"
stack = {'symbol': symbol, 'value': {
'firstPrice': 13.50,
'secondPrice': 14.00,
'thirdPrice': 13.00,
'timeframe': RedisTimeFrame.MIN2
}}
score1 = app.ThreeBarPlay(13.60, [], stack)
assert score1 == 4
score2 = app.ThreeBarPlay(13.40, [], stack)
assert score2 == 2
|
[
"from redis3barScore import StudyThreeBarsScore\nfrom redisUtil import RedisTimeFrame\n\n\ndef test_score1() -> None:\n package = {'close': 13.92,\n 'high': 14.57,\n 'low': 12.45,\n 'open': 13.4584,\n 'symbol': 'FANG',\n 'timestamp': 1627493640000000000,\n 'trade_count': 602,\n 'volume': 213907,\n 'vwap': 8.510506}\n app = StudyThreeBarsScore()\n newPrice = 13.70\n realtime = []\n symbol = \"FANG\"\n stack = {'symbol': symbol, 'value': {\n 'firstPrice': 13.50,\n 'secondPrice': 14.00,\n 'thirdPrice': 13.00,\n 'timeframe': RedisTimeFrame.MIN2\n }}\n score1 = app.ThreeBarPlay(13.60, [], stack)\n assert score1 == 4\n\n score2 = app.ThreeBarPlay(13.40, [], stack)\n assert score2 == 2\n",
"from redis3barScore import StudyThreeBarsScore\nfrom redisUtil import RedisTimeFrame\n\n\ndef test_score1() ->None:\n package = {'close': 13.92, 'high': 14.57, 'low': 12.45, 'open': 13.4584,\n 'symbol': 'FANG', 'timestamp': 1627493640000000000, 'trade_count': \n 602, 'volume': 213907, 'vwap': 8.510506}\n app = StudyThreeBarsScore()\n newPrice = 13.7\n realtime = []\n symbol = 'FANG'\n stack = {'symbol': symbol, 'value': {'firstPrice': 13.5, 'secondPrice':\n 14.0, 'thirdPrice': 13.0, 'timeframe': RedisTimeFrame.MIN2}}\n score1 = app.ThreeBarPlay(13.6, [], stack)\n assert score1 == 4\n score2 = app.ThreeBarPlay(13.4, [], stack)\n assert score2 == 2\n",
"<import token>\n\n\ndef test_score1() ->None:\n package = {'close': 13.92, 'high': 14.57, 'low': 12.45, 'open': 13.4584,\n 'symbol': 'FANG', 'timestamp': 1627493640000000000, 'trade_count': \n 602, 'volume': 213907, 'vwap': 8.510506}\n app = StudyThreeBarsScore()\n newPrice = 13.7\n realtime = []\n symbol = 'FANG'\n stack = {'symbol': symbol, 'value': {'firstPrice': 13.5, 'secondPrice':\n 14.0, 'thirdPrice': 13.0, 'timeframe': RedisTimeFrame.MIN2}}\n score1 = app.ThreeBarPlay(13.6, [], stack)\n assert score1 == 4\n score2 = app.ThreeBarPlay(13.4, [], stack)\n assert score2 == 2\n",
"<import token>\n<function token>\n"
] | false |
568 |
235bb1b9d4c41c12d7667a6bac48737464c685c7
|
from colorama import init, Fore, Style
import tempConv
#============================================================================#
# TEMP CONVERSION PROGRAM: #
#============================================================================#
#-----------------------------------------------------------------------------
def menu(x):
""" Takes a list as argument and displays as a menu """
for i in range(len(x)):
print("{0:>4s} {1:<3s}{2:^5s}{3:<15}"
.format(str(i + 1) + ")", x[i][1], "-->", x[i][0]))
#-----------------------------------------------------------------------------
def primary_message():
""" A message for the main switch """
print(Fore.CYAN + "\n Select the unit you want to convert from:\n" +
Fore.RESET)
menu([
["Celsius", "\u00b0C"],
["Fahrenheit", "\u00b0F"],
["Kelvin", "\u00b0K"],
["Rankin", "\u00b0R"],
["Delisle", "\u00b0De"],
["Newton", "\u00b0N"],
["R\u00e9aumur", "\u00b0R\u00e9"],
["R\u00f8mer", "\u00b0R\u00f8"],
[Fore.RED + "Exit\n" + Fore.RESET,""]
])
#-----------------------------------------------------------------------------
def secondary_message(t, unit):
""" A message for the secondary switch """
print(Fore.CYAN + "\n Select the unit you would you like to convert " +
str(t) + "\u00b0" + unit + " into:\n" + Fore.RESET)
menu([
["Celsius", "\u00b0C"],
["Fahrenheit", "\u00b0F"],
["Kelvin", "\u00b0K"],
["Rankin", "\u00b0R"],
["Delisle", "\u00b0De"],
["Newton", "\u00b0N"],
["R\u00e9aumur", "\u00b0R\u00e9"],
["R\u00f8mer", "\u00b0R\u00f8"],
[Fore.RED + "Back\n" + Fore.RESET,""]
])
#-----------------------------------------------------------------------------
def result_message(t, t2, unit, unit2):
from os import system
""" Prints the result to the screen """
print(Fore.GREEN + "\n " + str(round(t, 2)) + "\u00b0" + unit +
Fore.YELLOW +" --> " + Fore.GREEN + Style.BRIGHT +
str(round(t2, 2)) + "\u00b0" + unit2 + "\n" + Style.RESET_ALL)
print(system('pause'))
#-----------------------------------------------------------------------------
def choice(x, y = 0):
""" Checks user input """
while True:
try:
choice = int(input()) # <=== Check if it's an int
if choice <= x and choice > 0 and choice != y: # <=== If choice in
return choice # range and not the same; return choice
break
elif choice == y:
print(Fore.RED + "\n Can't convert to the same unit!\n" +
Fore.RESET)
else:
print(Fore.RED + "\n Invalid choice!\n" + Fore.RESET)
except ValueError: # <=== If choice is invalid prompt message
print(Fore.RED + "\n Invalid input!\n" + Fore.RESET)
#-----------------------------------------------------------------------------
def value_input(unit):
""" Asks user for temp. value, then checks it. """
print(Fore.CYAN + "\n Enter the temperature in \u00b0" + unit + ":\n" +
Fore.RESET)
while True:
try:
value = float(input()) # <=== Make sure input is a float
return value
break
except ValueError:
print(Fore.RED + "\n Input must be an integer!\n" + Fore.RESET)
#-----------------------------------------------------------------------------
def value_check(unit, value):
""" Check for value below absolute zero """
while True:
try: # <=== Checks that value isn't below abs 0
t = value_input(unit) # Returns value if okay
if value(t) != None:
return t
break
except ValueError:
tempConv(t)
#-----------------------------------------------------------------------------
def main():
"""" This is the main function """
while True:
primary_message() # <=== Display menu and take input
x = choice(9)
z = tempConv
if x == 1:
# This is the From Celsius options
t = value_check("C", tempConv.cel_ran)
secondary_message(t, "C")
y = choice(9, 1)
while True:
if y == 2:
t2 = z.cel_fah(t) # <=== Fahrenheit
result_message(t, t2, "C", "F")
break
elif y == 3:
t2 = z.cel_kel(t) # <=== Kelvin
result_message(t, t2, "C", "K")
break
elif y == 4:
t2 = z.cel_ran(t) # <=== Rankin
result_message(t, t2, "C", "R")
break
elif y == 5:
t2 = z.cel_del(t) # <=== Delisle
result_message(t, t2, "C", "De")
break
elif y == 6:
t2 = z.cel_new(t) # <=== Newton
result_message(t, t2, "C", "N")
break
elif y == 7:
t2 = z.cel_rea(t) # <=== Reaumur
result_message(t, t2, "C", "R\u00e9")
break
elif y == 8:
t2 = z.cel_rom(t) # <=== Romer
result_message(t, t2, "C", "R\u00f8")
break
elif y == 9:
break
elif x == 2:
t = value_check("F", tempConv.fah_ran)
secondary_message(t, "F")
y = choice(9, 2)
while True:
if y == 1:
t2 = z.fah_cel(t)
result_message(t, t2, "F", "C")
break
elif y == 3:
t2 = z.fah_kel(t)
result_message(t, t2, "F", "K")
break
elif y == 4:
t2 = z.fah_ran(t)
result_message(t, t2, "F", "R")
break
elif y == 5:
t2 = z.fah_del(t)
result_message(t, t2, "F", "De")
break
elif y == 6:
t2 = z.fah_new(t)
result_message(t, t2, "F", "N")
break
elif y == 7:
t2 = z.fah_rea(t)
result_message(t, t2, "F", "R\u00e9")
break
elif y == 8:
t2 = z.fah_rom(t)
result_message(t, t2, "F", "R\u00f8")
break
elif y == 9:
break
elif x == 3:
t = value_check("K", tempConv.kel_ran)
secondary_message(t, "K")
y = choice(9, 3)
while True:
if y == 1:
t2 = z.kel_cel(t)
result_message(t, t2, "K", "C")
break
elif y == 2:
t2 = z.kel_fah(t)
result_message(t, t2, "K", "F")
break
elif y == 4:
t2 = z.kel_ran(t)
result_message(t, t2, "K", "R")
break
elif y == 5:
t2 = z.kel_del(t)
result_message(t, t2, "K", "De")
break
elif y == 6:
t2 = z.kel_new(t)
result_message(t, t2, "K", "N")
break
elif y == 7:
t2 = z.kel_rea(t)
result_message(t, t2, "K", "R\u00e9")
break
elif y == 8:
t2 = z.kel_rom(t)
result_message(t, t2, "K", "R\u00f8")
break
elif y == 9:
break
elif x == 4:
t = value_check("R", tempConv.ran_rea)
secondary_message(t, "R")
y = choice(9, 4)
while True:
if y == 1:
t2 = z.ran_cel(t)
result_message(t, t2, "R", "C")
break
elif y == 2:
t2 = z.ran_fah(t)
result_message(t, t2, "R", "F")
break
elif y == 3:
t2 = z.ran_kel(t)
result_message(t, t2, "R", "K")
break
elif y == 5:
t2 = z.ran_del(t)
result_message(t, t2, "R", "De")
break
elif y == 6:
t2 = z.ran_new(t)
result_message(t, t2, "R", "N")
break
elif y == 7:
t2 = z.ran_rea(t)
result_message(t, t2, "R", "R\u00e9")
break
elif y == 8:
t2 = z.ran_rom(t)
result_message(t, t2, "R", "R\u00f8")
break
elif y == 9:
break
elif x == 5:
t = value_check("De", tempConv.del_ran)
secondary_message(t, "De")
y = choice(9, 5)
while True:
if y == 1:
t2 = z.del_cel(t)
result_message(t, t2, "De", "C")
break
elif y == 2:
t2 = z.del_fah(t)
result_message(t, t2, "De", "F")
break
elif y == 3:
t2 = z.del_kel(t)
result_message(t, t2, "De", "K")
break
elif y == 4:
t2 = z.del_ran(t)
result_message(t, t2, "De", "R")
break
elif y == 6:
t2 = z.del_new(t)
result_message(t, t2, "De", "N")
break
elif y == 7:
t2 = z.del_rea(t)
result_message(t, t2, "De", "R\u00e9")
break
elif y == 8:
t2 = z.del_rom(t)
result_message(t, t2, "De", "R\u00f8")
break
elif y == 9:
break
elif x == 6:
t = value_check("N", tempConv.new_ran)
secondary_message(t, "N")
y = choice(9, 6)
while True:
if y == 1:
t2 = z.new_cel(t)
result_message(t, t2, "N", "C")
break
elif y == 2:
t2 = z.new_fah(t)
result_message(t, t2, "N", "F")
break
elif y == 3:
t2 = z.new_kel(t)
result_message(t, t2, "N", "K")
break
elif y == 4:
t2 = z.new_ran(t)
result_message(t, t2, "N", "R")
break
elif y == 5:
t2 = z.new_del(t)
result_message(t, t2, "N", "N")
break
elif y == 7:
t2 = z.new_rea(t)
result_message(t, t2, "N", "R\u00e9")
break
elif y == 8:
t2 = z.new_rom(t)
result_message(t, t2, "N", "R\u00f8")
break
elif y == 9:
break
elif x == 7:
t = value_check("R\u00e9", tempConv.rea_ran)
secondary_message(t, "R\u00e9")
y = choice(9, 7)
while True:
if y == 1:
t2 = z.rea_cel(t)
result_message(t, t2, "R\u00e9", "C")
break
elif y == 2:
t2 = z.rea_fah(t)
result_message(t, t2, "R\u00e9", "F")
break
elif y == 3:
t2 = z.rea_kel(t)
result_message(t, t2, "R\u00e9", "K")
break
elif y == 4:
t2 = z.rea_ran(t)
result_message(t, t2, "R\u00e9", "R")
break
elif y == 5:
t2 = z.rea_del(t)
result_message(t, t2, "R\u00e9", "De")
break
elif y == 6:
t2 = z.rea_new(t)
result_message(t, t2, "R\u00e9", "N")
break
elif y == 8:
t2 = z.rea_rom(t)
result_message(t, t2, "R\u00e9", "R\u00f8")
break
elif y == 9:
break
elif x == 8:
t = value_check("R\u00f8", tempConv.rom_ran)
secondary_message(t, "R\u00f8")
y = choice(9, 8)
while True:
if y == 1:
t2 = z.rom_cel(t)
result_message(t, t2, "R\u00f8", "C")
break
elif y == 2:
t2 = z.rom_fah(t)
result_message(t, t2, "R\u00f8", "F")
break
elif y == 3:
t2 = z.rom_kel(t)
result_message(t, t2, "R\u00f8", "K")
break
elif y == 4:
t2 = z.rom_ran(t)
result_message(t, t2, "R\u00f8", "R")
break
elif y == 5:
t2 = z.rom_del(t)
result_message(t, t2, "R\u00f8", "De")
break
elif y == 6:
t2 = z.rom_new(t)
result_message(t, t2, "R\u00f8", "N")
break
elif y == 7:
t2 = z.rom_rea(t)
result_message(t, t2, "R\u00f8", "R\u00e9")
break
elif y == 9:
break
elif x == 9:
print(Fore.CYAN + "\n Goodbye!" + Fore.RESET)
i = 0
break
#-----------------------------------------------------------------------------
if __name__ == "__main__":
init()
main()
|
[
"\nfrom colorama import init, Fore, Style\nimport tempConv\n\n#============================================================================#\n# TEMP CONVERSION PROGRAM: #\n#============================================================================#\n\n#-----------------------------------------------------------------------------\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print(\"{0:>4s} {1:<3s}{2:^5s}{3:<15}\"\n .format(str(i + 1) + \")\", x[i][1], \"-->\", x[i][0]))\n\n#-----------------------------------------------------------------------------\n\ndef primary_message():\n \"\"\" A message for the main switch \"\"\"\n print(Fore.CYAN + \"\\n Select the unit you want to convert from:\\n\" +\n Fore.RESET)\n menu([\n [\"Celsius\", \"\\u00b0C\"],\n [\"Fahrenheit\", \"\\u00b0F\"],\n [\"Kelvin\", \"\\u00b0K\"],\n [\"Rankin\", \"\\u00b0R\"],\n [\"Delisle\", \"\\u00b0De\"],\n [\"Newton\", \"\\u00b0N\"],\n [\"R\\u00e9aumur\", \"\\u00b0R\\u00e9\"],\n [\"R\\u00f8mer\", \"\\u00b0R\\u00f8\"],\n [Fore.RED + \"Exit\\n\" + Fore.RESET,\"\"]\n ])\n\n#-----------------------------------------------------------------------------\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + \"\\n Select the unit you would you like to convert \" +\n str(t) + \"\\u00b0\" + unit + \" into:\\n\" + Fore.RESET)\n menu([\n [\"Celsius\", \"\\u00b0C\"],\n [\"Fahrenheit\", \"\\u00b0F\"],\n [\"Kelvin\", \"\\u00b0K\"],\n [\"Rankin\", \"\\u00b0R\"],\n [\"Delisle\", \"\\u00b0De\"],\n [\"Newton\", \"\\u00b0N\"],\n [\"R\\u00e9aumur\", \"\\u00b0R\\u00e9\"],\n [\"R\\u00f8mer\", \"\\u00b0R\\u00f8\"],\n [Fore.RED + \"Back\\n\" + Fore.RESET,\"\"]\n ])\n\n#-----------------------------------------------------------------------------\n\ndef result_message(t, t2, unit, unit2):\n from os import system\n \"\"\" Prints the result to the screen \"\"\"\n print(Fore.GREEN + \"\\n \" + str(round(t, 2)) + \"\\u00b0\" + unit +\n Fore.YELLOW +\" --> \" + Fore.GREEN + Style.BRIGHT +\n str(round(t2, 2)) + \"\\u00b0\" + unit2 + \"\\n\" + Style.RESET_ALL)\n print(system('pause'))\n#-----------------------------------------------------------------------------\n\ndef choice(x, y = 0):\n \"\"\" Checks user input \"\"\"\n while True:\n try:\n choice = int(input()) # <=== Check if it's an int\n if choice <= x and choice > 0 and choice != y: # <=== If choice in\n return choice # range and not the same; return choice\n break\n elif choice == y:\n print(Fore.RED + \"\\n Can't convert to the same unit!\\n\" +\n Fore.RESET)\n else:\n print(Fore.RED + \"\\n Invalid choice!\\n\" + Fore.RESET)\n except ValueError: # <=== If choice is invalid prompt message\n print(Fore.RED + \"\\n Invalid input!\\n\" + Fore.RESET)\n\n#-----------------------------------------------------------------------------\n\ndef value_input(unit):\n \"\"\" Asks user for temp. value, then checks it. \"\"\"\n print(Fore.CYAN + \"\\n Enter the temperature in \\u00b0\" + unit + \":\\n\" +\n Fore.RESET)\n while True:\n try:\n value = float(input()) # <=== Make sure input is a float\n return value\n break\n except ValueError:\n print(Fore.RED + \"\\n Input must be an integer!\\n\" + Fore.RESET)\n\n#-----------------------------------------------------------------------------\n\ndef value_check(unit, value):\n \"\"\" Check for value below absolute zero \"\"\"\n while True:\n try: # <=== Checks that value isn't below abs 0\n t = value_input(unit) # Returns value if okay\n if value(t) != None:\n return t\n break\n except ValueError:\n tempConv(t)\n\n#-----------------------------------------------------------------------------\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message() # <=== Display menu and take input\n x = choice(9)\n z = tempConv\n\n if x == 1:\n # This is the From Celsius options\n t = value_check(\"C\", tempConv.cel_ran)\n secondary_message(t, \"C\")\n y = choice(9, 1)\n\n while True:\n if y == 2:\n t2 = z.cel_fah(t) # <=== Fahrenheit\n result_message(t, t2, \"C\", \"F\")\n break\n elif y == 3:\n t2 = z.cel_kel(t) # <=== Kelvin\n result_message(t, t2, \"C\", \"K\")\n break\n elif y == 4:\n t2 = z.cel_ran(t) # <=== Rankin\n result_message(t, t2, \"C\", \"R\")\n break\n elif y == 5:\n t2 = z.cel_del(t) # <=== Delisle\n result_message(t, t2, \"C\", \"De\")\n break\n elif y == 6:\n t2 = z.cel_new(t) # <=== Newton\n result_message(t, t2, \"C\", \"N\")\n break\n elif y == 7:\n t2 = z.cel_rea(t) # <=== Reaumur\n result_message(t, t2, \"C\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.cel_rom(t) # <=== Romer\n result_message(t, t2, \"C\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 2:\n t = value_check(\"F\", tempConv.fah_ran)\n secondary_message(t, \"F\")\n y = choice(9, 2)\n\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, \"F\", \"C\")\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, \"F\", \"K\")\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, \"F\", \"R\")\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, \"F\", \"De\")\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, \"F\", \"N\")\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, \"F\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, \"F\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 3:\n t = value_check(\"K\", tempConv.kel_ran)\n secondary_message(t, \"K\")\n y = choice(9, 3)\n\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, \"K\", \"C\")\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, \"K\", \"F\")\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, \"K\", \"R\")\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, \"K\", \"De\")\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, \"K\", \"N\")\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, \"K\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, \"K\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 4:\n t = value_check(\"R\", tempConv.ran_rea)\n secondary_message(t, \"R\")\n y = choice(9, 4)\n\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, \"R\", \"C\")\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, \"R\", \"F\")\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, \"R\", \"K\")\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, \"R\", \"De\")\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, \"R\", \"N\")\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, \"R\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, \"R\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 5:\n t = value_check(\"De\", tempConv.del_ran)\n secondary_message(t, \"De\")\n y = choice(9, 5)\n\n while True:\n\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, \"De\", \"C\")\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, \"De\", \"F\")\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, \"De\", \"K\")\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, \"De\", \"R\")\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, \"De\", \"N\")\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, \"De\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, \"De\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 6:\n t = value_check(\"N\", tempConv.new_ran)\n secondary_message(t, \"N\")\n y = choice(9, 6)\n\n while True:\n\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, \"N\", \"C\")\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, \"N\", \"F\")\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, \"N\", \"K\")\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, \"N\", \"R\")\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, \"N\", \"N\")\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, \"N\", \"R\\u00e9\")\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, \"N\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 7:\n t = value_check(\"R\\u00e9\", tempConv.rea_ran)\n secondary_message(t, \"R\\u00e9\")\n y = choice(9, 7)\n\n while True:\n\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, \"R\\u00e9\", \"C\")\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, \"R\\u00e9\", \"F\")\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, \"R\\u00e9\", \"K\")\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, \"R\\u00e9\", \"R\")\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, \"R\\u00e9\", \"De\")\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, \"R\\u00e9\", \"N\")\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, \"R\\u00e9\", \"R\\u00f8\")\n break\n elif y == 9:\n break\n\n elif x == 8:\n t = value_check(\"R\\u00f8\", tempConv.rom_ran)\n secondary_message(t, \"R\\u00f8\")\n y = choice(9, 8)\n\n while True:\n\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, \"R\\u00f8\", \"C\")\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, \"R\\u00f8\", \"F\")\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, \"R\\u00f8\", \"K\")\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, \"R\\u00f8\", \"R\")\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, \"R\\u00f8\", \"De\")\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, \"R\\u00f8\", \"N\")\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, \"R\\u00f8\", \"R\\u00e9\")\n break\n elif y == 9:\n break\n\n elif x == 9:\n print(Fore.CYAN + \"\\n Goodbye!\" + Fore.RESET)\n i = 0\n break\n\n#-----------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n init()\n main()\n",
"from colorama import init, Fore, Style\nimport tempConv\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\ndef primary_message():\n \"\"\" A message for the main switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you want to convert from:\\n' +\n Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Exit\\n' + Fore.RESET, '']])\n\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you would you like to convert ' +\n str(t) + '°' + unit + ' into:\\n' + Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Back\\n' + Fore.RESET, '']])\n\n\ndef result_message(t, t2, unit, unit2):\n from os import system\n \"\"\" Prints the result to the screen \"\"\"\n print(Fore.GREEN + '\\n ' + str(round(t, 2)) + '°' + unit + Fore.YELLOW +\n ' --> ' + Fore.GREEN + Style.BRIGHT + str(round(t2, 2)) + '°' +\n unit2 + '\\n' + Style.RESET_ALL)\n print(system('pause'))\n\n\ndef choice(x, y=0):\n \"\"\" Checks user input \"\"\"\n while True:\n try:\n choice = int(input())\n if choice <= x and choice > 0 and choice != y:\n return choice\n break\n elif choice == y:\n print(Fore.RED + \"\\n Can't convert to the same unit!\\n\" +\n Fore.RESET)\n else:\n print(Fore.RED + '\\n Invalid choice!\\n' + Fore.RESET)\n except ValueError:\n print(Fore.RED + '\\n Invalid input!\\n' + Fore.RESET)\n\n\ndef value_input(unit):\n \"\"\" Asks user for temp. value, then checks it. \"\"\"\n print(Fore.CYAN + '\\n Enter the temperature in °' + unit + ':\\n' + Fore\n .RESET)\n while True:\n try:\n value = float(input())\n return value\n break\n except ValueError:\n print(Fore.RED + '\\n Input must be an integer!\\n' + Fore.RESET)\n\n\ndef value_check(unit, value):\n \"\"\" Check for value below absolute zero \"\"\"\n while True:\n try:\n t = value_input(unit)\n if value(t) != None:\n return t\n break\n except ValueError:\n tempConv(t)\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\nif __name__ == '__main__':\n init()\n main()\n",
"<import token>\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\ndef primary_message():\n \"\"\" A message for the main switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you want to convert from:\\n' +\n Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Exit\\n' + Fore.RESET, '']])\n\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you would you like to convert ' +\n str(t) + '°' + unit + ' into:\\n' + Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Back\\n' + Fore.RESET, '']])\n\n\ndef result_message(t, t2, unit, unit2):\n from os import system\n \"\"\" Prints the result to the screen \"\"\"\n print(Fore.GREEN + '\\n ' + str(round(t, 2)) + '°' + unit + Fore.YELLOW +\n ' --> ' + Fore.GREEN + Style.BRIGHT + str(round(t2, 2)) + '°' +\n unit2 + '\\n' + Style.RESET_ALL)\n print(system('pause'))\n\n\ndef choice(x, y=0):\n \"\"\" Checks user input \"\"\"\n while True:\n try:\n choice = int(input())\n if choice <= x and choice > 0 and choice != y:\n return choice\n break\n elif choice == y:\n print(Fore.RED + \"\\n Can't convert to the same unit!\\n\" +\n Fore.RESET)\n else:\n print(Fore.RED + '\\n Invalid choice!\\n' + Fore.RESET)\n except ValueError:\n print(Fore.RED + '\\n Invalid input!\\n' + Fore.RESET)\n\n\ndef value_input(unit):\n \"\"\" Asks user for temp. value, then checks it. \"\"\"\n print(Fore.CYAN + '\\n Enter the temperature in °' + unit + ':\\n' + Fore\n .RESET)\n while True:\n try:\n value = float(input())\n return value\n break\n except ValueError:\n print(Fore.RED + '\\n Input must be an integer!\\n' + Fore.RESET)\n\n\ndef value_check(unit, value):\n \"\"\" Check for value below absolute zero \"\"\"\n while True:\n try:\n t = value_input(unit)\n if value(t) != None:\n return t\n break\n except ValueError:\n tempConv(t)\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\nif __name__ == '__main__':\n init()\n main()\n",
"<import token>\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\ndef primary_message():\n \"\"\" A message for the main switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you want to convert from:\\n' +\n Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Exit\\n' + Fore.RESET, '']])\n\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you would you like to convert ' +\n str(t) + '°' + unit + ' into:\\n' + Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Back\\n' + Fore.RESET, '']])\n\n\ndef result_message(t, t2, unit, unit2):\n from os import system\n \"\"\" Prints the result to the screen \"\"\"\n print(Fore.GREEN + '\\n ' + str(round(t, 2)) + '°' + unit + Fore.YELLOW +\n ' --> ' + Fore.GREEN + Style.BRIGHT + str(round(t2, 2)) + '°' +\n unit2 + '\\n' + Style.RESET_ALL)\n print(system('pause'))\n\n\ndef choice(x, y=0):\n \"\"\" Checks user input \"\"\"\n while True:\n try:\n choice = int(input())\n if choice <= x and choice > 0 and choice != y:\n return choice\n break\n elif choice == y:\n print(Fore.RED + \"\\n Can't convert to the same unit!\\n\" +\n Fore.RESET)\n else:\n print(Fore.RED + '\\n Invalid choice!\\n' + Fore.RESET)\n except ValueError:\n print(Fore.RED + '\\n Invalid input!\\n' + Fore.RESET)\n\n\ndef value_input(unit):\n \"\"\" Asks user for temp. value, then checks it. \"\"\"\n print(Fore.CYAN + '\\n Enter the temperature in °' + unit + ':\\n' + Fore\n .RESET)\n while True:\n try:\n value = float(input())\n return value\n break\n except ValueError:\n print(Fore.RED + '\\n Input must be an integer!\\n' + Fore.RESET)\n\n\ndef value_check(unit, value):\n \"\"\" Check for value below absolute zero \"\"\"\n while True:\n try:\n t = value_input(unit)\n if value(t) != None:\n return t\n break\n except ValueError:\n tempConv(t)\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\n<code token>\n",
"<import token>\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\ndef primary_message():\n \"\"\" A message for the main switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you want to convert from:\\n' +\n Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Exit\\n' + Fore.RESET, '']])\n\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you would you like to convert ' +\n str(t) + '°' + unit + ' into:\\n' + Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Back\\n' + Fore.RESET, '']])\n\n\ndef result_message(t, t2, unit, unit2):\n from os import system\n \"\"\" Prints the result to the screen \"\"\"\n print(Fore.GREEN + '\\n ' + str(round(t, 2)) + '°' + unit + Fore.YELLOW +\n ' --> ' + Fore.GREEN + Style.BRIGHT + str(round(t2, 2)) + '°' +\n unit2 + '\\n' + Style.RESET_ALL)\n print(system('pause'))\n\n\ndef choice(x, y=0):\n \"\"\" Checks user input \"\"\"\n while True:\n try:\n choice = int(input())\n if choice <= x and choice > 0 and choice != y:\n return choice\n break\n elif choice == y:\n print(Fore.RED + \"\\n Can't convert to the same unit!\\n\" +\n Fore.RESET)\n else:\n print(Fore.RED + '\\n Invalid choice!\\n' + Fore.RESET)\n except ValueError:\n print(Fore.RED + '\\n Invalid input!\\n' + Fore.RESET)\n\n\n<function token>\n\n\ndef value_check(unit, value):\n \"\"\" Check for value below absolute zero \"\"\"\n while True:\n try:\n t = value_input(unit)\n if value(t) != None:\n return t\n break\n except ValueError:\n tempConv(t)\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\n<code token>\n",
"<import token>\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\ndef primary_message():\n \"\"\" A message for the main switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you want to convert from:\\n' +\n Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Exit\\n' + Fore.RESET, '']])\n\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you would you like to convert ' +\n str(t) + '°' + unit + ' into:\\n' + Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Back\\n' + Fore.RESET, '']])\n\n\ndef result_message(t, t2, unit, unit2):\n from os import system\n \"\"\" Prints the result to the screen \"\"\"\n print(Fore.GREEN + '\\n ' + str(round(t, 2)) + '°' + unit + Fore.YELLOW +\n ' --> ' + Fore.GREEN + Style.BRIGHT + str(round(t2, 2)) + '°' +\n unit2 + '\\n' + Style.RESET_ALL)\n print(system('pause'))\n\n\ndef choice(x, y=0):\n \"\"\" Checks user input \"\"\"\n while True:\n try:\n choice = int(input())\n if choice <= x and choice > 0 and choice != y:\n return choice\n break\n elif choice == y:\n print(Fore.RED + \"\\n Can't convert to the same unit!\\n\" +\n Fore.RESET)\n else:\n print(Fore.RED + '\\n Invalid choice!\\n' + Fore.RESET)\n except ValueError:\n print(Fore.RED + '\\n Invalid input!\\n' + Fore.RESET)\n\n\n<function token>\n<function token>\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\n<code token>\n",
"<import token>\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\ndef primary_message():\n \"\"\" A message for the main switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you want to convert from:\\n' +\n Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Exit\\n' + Fore.RESET, '']])\n\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you would you like to convert ' +\n str(t) + '°' + unit + ' into:\\n' + Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Back\\n' + Fore.RESET, '']])\n\n\n<function token>\n\n\ndef choice(x, y=0):\n \"\"\" Checks user input \"\"\"\n while True:\n try:\n choice = int(input())\n if choice <= x and choice > 0 and choice != y:\n return choice\n break\n elif choice == y:\n print(Fore.RED + \"\\n Can't convert to the same unit!\\n\" +\n Fore.RESET)\n else:\n print(Fore.RED + '\\n Invalid choice!\\n' + Fore.RESET)\n except ValueError:\n print(Fore.RED + '\\n Invalid input!\\n' + Fore.RESET)\n\n\n<function token>\n<function token>\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\n<code token>\n",
"<import token>\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\ndef primary_message():\n \"\"\" A message for the main switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you want to convert from:\\n' +\n Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Exit\\n' + Fore.RESET, '']])\n\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you would you like to convert ' +\n str(t) + '°' + unit + ' into:\\n' + Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Back\\n' + Fore.RESET, '']])\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\n<code token>\n",
"<import token>\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\n<function token>\n\n\ndef secondary_message(t, unit):\n \"\"\" A message for the secondary switch \"\"\"\n print(Fore.CYAN + '\\n Select the unit you would you like to convert ' +\n str(t) + '°' + unit + ' into:\\n' + Fore.RESET)\n menu([['Celsius', '°C'], ['Fahrenheit', '°F'], ['Kelvin', '°K'], [\n 'Rankin', '°R'], ['Delisle', '°De'], ['Newton', '°N'], ['Réaumur',\n '°Ré'], ['Rømer', '°Rø'], [Fore.RED + 'Back\\n' + Fore.RESET, '']])\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\n<code token>\n",
"<import token>\n\n\ndef menu(x):\n \"\"\" Takes a list as argument and displays as a menu \"\"\"\n for i in range(len(x)):\n print('{0:>4s} {1:<3s}{2:^5s}{3:<15}'.format(str(i + 1) + ')', x[i]\n [1], '-->', x[i][0]))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef main():\n \"\"\"\" This is the main function \"\"\"\n while True:\n primary_message()\n x = choice(9)\n z = tempConv\n if x == 1:\n t = value_check('C', tempConv.cel_ran)\n secondary_message(t, 'C')\n y = choice(9, 1)\n while True:\n if y == 2:\n t2 = z.cel_fah(t)\n result_message(t, t2, 'C', 'F')\n break\n elif y == 3:\n t2 = z.cel_kel(t)\n result_message(t, t2, 'C', 'K')\n break\n elif y == 4:\n t2 = z.cel_ran(t)\n result_message(t, t2, 'C', 'R')\n break\n elif y == 5:\n t2 = z.cel_del(t)\n result_message(t, t2, 'C', 'De')\n break\n elif y == 6:\n t2 = z.cel_new(t)\n result_message(t, t2, 'C', 'N')\n break\n elif y == 7:\n t2 = z.cel_rea(t)\n result_message(t, t2, 'C', 'Ré')\n break\n elif y == 8:\n t2 = z.cel_rom(t)\n result_message(t, t2, 'C', 'Rø')\n break\n elif y == 9:\n break\n elif x == 2:\n t = value_check('F', tempConv.fah_ran)\n secondary_message(t, 'F')\n y = choice(9, 2)\n while True:\n if y == 1:\n t2 = z.fah_cel(t)\n result_message(t, t2, 'F', 'C')\n break\n elif y == 3:\n t2 = z.fah_kel(t)\n result_message(t, t2, 'F', 'K')\n break\n elif y == 4:\n t2 = z.fah_ran(t)\n result_message(t, t2, 'F', 'R')\n break\n elif y == 5:\n t2 = z.fah_del(t)\n result_message(t, t2, 'F', 'De')\n break\n elif y == 6:\n t2 = z.fah_new(t)\n result_message(t, t2, 'F', 'N')\n break\n elif y == 7:\n t2 = z.fah_rea(t)\n result_message(t, t2, 'F', 'Ré')\n break\n elif y == 8:\n t2 = z.fah_rom(t)\n result_message(t, t2, 'F', 'Rø')\n break\n elif y == 9:\n break\n elif x == 3:\n t = value_check('K', tempConv.kel_ran)\n secondary_message(t, 'K')\n y = choice(9, 3)\n while True:\n if y == 1:\n t2 = z.kel_cel(t)\n result_message(t, t2, 'K', 'C')\n break\n elif y == 2:\n t2 = z.kel_fah(t)\n result_message(t, t2, 'K', 'F')\n break\n elif y == 4:\n t2 = z.kel_ran(t)\n result_message(t, t2, 'K', 'R')\n break\n elif y == 5:\n t2 = z.kel_del(t)\n result_message(t, t2, 'K', 'De')\n break\n elif y == 6:\n t2 = z.kel_new(t)\n result_message(t, t2, 'K', 'N')\n break\n elif y == 7:\n t2 = z.kel_rea(t)\n result_message(t, t2, 'K', 'Ré')\n break\n elif y == 8:\n t2 = z.kel_rom(t)\n result_message(t, t2, 'K', 'Rø')\n break\n elif y == 9:\n break\n elif x == 4:\n t = value_check('R', tempConv.ran_rea)\n secondary_message(t, 'R')\n y = choice(9, 4)\n while True:\n if y == 1:\n t2 = z.ran_cel(t)\n result_message(t, t2, 'R', 'C')\n break\n elif y == 2:\n t2 = z.ran_fah(t)\n result_message(t, t2, 'R', 'F')\n break\n elif y == 3:\n t2 = z.ran_kel(t)\n result_message(t, t2, 'R', 'K')\n break\n elif y == 5:\n t2 = z.ran_del(t)\n result_message(t, t2, 'R', 'De')\n break\n elif y == 6:\n t2 = z.ran_new(t)\n result_message(t, t2, 'R', 'N')\n break\n elif y == 7:\n t2 = z.ran_rea(t)\n result_message(t, t2, 'R', 'Ré')\n break\n elif y == 8:\n t2 = z.ran_rom(t)\n result_message(t, t2, 'R', 'Rø')\n break\n elif y == 9:\n break\n elif x == 5:\n t = value_check('De', tempConv.del_ran)\n secondary_message(t, 'De')\n y = choice(9, 5)\n while True:\n if y == 1:\n t2 = z.del_cel(t)\n result_message(t, t2, 'De', 'C')\n break\n elif y == 2:\n t2 = z.del_fah(t)\n result_message(t, t2, 'De', 'F')\n break\n elif y == 3:\n t2 = z.del_kel(t)\n result_message(t, t2, 'De', 'K')\n break\n elif y == 4:\n t2 = z.del_ran(t)\n result_message(t, t2, 'De', 'R')\n break\n elif y == 6:\n t2 = z.del_new(t)\n result_message(t, t2, 'De', 'N')\n break\n elif y == 7:\n t2 = z.del_rea(t)\n result_message(t, t2, 'De', 'Ré')\n break\n elif y == 8:\n t2 = z.del_rom(t)\n result_message(t, t2, 'De', 'Rø')\n break\n elif y == 9:\n break\n elif x == 6:\n t = value_check('N', tempConv.new_ran)\n secondary_message(t, 'N')\n y = choice(9, 6)\n while True:\n if y == 1:\n t2 = z.new_cel(t)\n result_message(t, t2, 'N', 'C')\n break\n elif y == 2:\n t2 = z.new_fah(t)\n result_message(t, t2, 'N', 'F')\n break\n elif y == 3:\n t2 = z.new_kel(t)\n result_message(t, t2, 'N', 'K')\n break\n elif y == 4:\n t2 = z.new_ran(t)\n result_message(t, t2, 'N', 'R')\n break\n elif y == 5:\n t2 = z.new_del(t)\n result_message(t, t2, 'N', 'N')\n break\n elif y == 7:\n t2 = z.new_rea(t)\n result_message(t, t2, 'N', 'Ré')\n break\n elif y == 8:\n t2 = z.new_rom(t)\n result_message(t, t2, 'N', 'Rø')\n break\n elif y == 9:\n break\n elif x == 7:\n t = value_check('Ré', tempConv.rea_ran)\n secondary_message(t, 'Ré')\n y = choice(9, 7)\n while True:\n if y == 1:\n t2 = z.rea_cel(t)\n result_message(t, t2, 'Ré', 'C')\n break\n elif y == 2:\n t2 = z.rea_fah(t)\n result_message(t, t2, 'Ré', 'F')\n break\n elif y == 3:\n t2 = z.rea_kel(t)\n result_message(t, t2, 'Ré', 'K')\n break\n elif y == 4:\n t2 = z.rea_ran(t)\n result_message(t, t2, 'Ré', 'R')\n break\n elif y == 5:\n t2 = z.rea_del(t)\n result_message(t, t2, 'Ré', 'De')\n break\n elif y == 6:\n t2 = z.rea_new(t)\n result_message(t, t2, 'Ré', 'N')\n break\n elif y == 8:\n t2 = z.rea_rom(t)\n result_message(t, t2, 'Ré', 'Rø')\n break\n elif y == 9:\n break\n elif x == 8:\n t = value_check('Rø', tempConv.rom_ran)\n secondary_message(t, 'Rø')\n y = choice(9, 8)\n while True:\n if y == 1:\n t2 = z.rom_cel(t)\n result_message(t, t2, 'Rø', 'C')\n break\n elif y == 2:\n t2 = z.rom_fah(t)\n result_message(t, t2, 'Rø', 'F')\n break\n elif y == 3:\n t2 = z.rom_kel(t)\n result_message(t, t2, 'Rø', 'K')\n break\n elif y == 4:\n t2 = z.rom_ran(t)\n result_message(t, t2, 'Rø', 'R')\n break\n elif y == 5:\n t2 = z.rom_del(t)\n result_message(t, t2, 'Rø', 'De')\n break\n elif y == 6:\n t2 = z.rom_new(t)\n result_message(t, t2, 'Rø', 'N')\n break\n elif y == 7:\n t2 = z.rom_rea(t)\n result_message(t, t2, 'Rø', 'Ré')\n break\n elif y == 9:\n break\n elif x == 9:\n print(Fore.CYAN + '\\n Goodbye!' + Fore.RESET)\n i = 0\n break\n\n\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
569 |
d9f586bbb72021ee0b37ff8660e26b50d7e6a2d3
|
from django.http import HttpResponse
from django.shortcuts import render
def index(request):
return render(request, 'ALR1.html')
def search(request):
return render(request, 'ALR2.html')
def home(request):
return render(request, 'ALR3.html')
def pdf(request):
pdfId = request.GET['id']
# pdf_data=open('pdf/' + pdfId + '.pdf','rb').read()
pdf_data=open('pdf/test.pdf','rb').read()
return HttpResponse(pdf_data, content_type='application/pdf')
|
[
"from django.http import HttpResponse\nfrom django.shortcuts import render\ndef index(request):\n return render(request, 'ALR1.html')\ndef search(request):\n return render(request, 'ALR2.html')\ndef home(request):\n return render(request, 'ALR3.html')\ndef pdf(request):\n pdfId = request.GET['id']\n # pdf_data=open('pdf/' + pdfId + '.pdf','rb').read()\n pdf_data=open('pdf/test.pdf','rb').read()\n return HttpResponse(pdf_data, content_type='application/pdf')\n",
"from django.http import HttpResponse\nfrom django.shortcuts import render\n\n\ndef index(request):\n return render(request, 'ALR1.html')\n\n\ndef search(request):\n return render(request, 'ALR2.html')\n\n\ndef home(request):\n return render(request, 'ALR3.html')\n\n\ndef pdf(request):\n pdfId = request.GET['id']\n pdf_data = open('pdf/test.pdf', 'rb').read()\n return HttpResponse(pdf_data, content_type='application/pdf')\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'ALR1.html')\n\n\ndef search(request):\n return render(request, 'ALR2.html')\n\n\ndef home(request):\n return render(request, 'ALR3.html')\n\n\ndef pdf(request):\n pdfId = request.GET['id']\n pdf_data = open('pdf/test.pdf', 'rb').read()\n return HttpResponse(pdf_data, content_type='application/pdf')\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'ALR1.html')\n\n\ndef search(request):\n return render(request, 'ALR2.html')\n\n\ndef home(request):\n return render(request, 'ALR3.html')\n\n\n<function token>\n",
"<import token>\n\n\ndef index(request):\n return render(request, 'ALR1.html')\n\n\n<function token>\n\n\ndef home(request):\n return render(request, 'ALR3.html')\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef home(request):\n return render(request, 'ALR3.html')\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
570 |
3d737d0ee9c3af1f8ebe4c6998ad30fa34f42856
|
from django.shortcuts import render
from .. login.models import *
def user(request):
context = {
"users" : User.objects.all(),
"user_level" : User.objects.get(id = request.session['user_id'])
}
return render(request, 'dashboard/user.html', context)
def admin(request):
context = {
"users" : User.objects.all(),
"user_level" : User.objects.get(id = request.session['user_id'])
}
return render(request, 'dashboard/admin.html', context)
|
[
"from django.shortcuts import render\nfrom .. login.models import *\n\ndef user(request):\n context = {\n \"users\" : User.objects.all(),\n \"user_level\" : User.objects.get(id = request.session['user_id'])\n }\n return render(request, 'dashboard/user.html', context)\n\ndef admin(request):\n context = {\n \"users\" : User.objects.all(),\n \"user_level\" : User.objects.get(id = request.session['user_id'])\n }\n return render(request, 'dashboard/admin.html', context)\n",
"from django.shortcuts import render\nfrom ..login.models import *\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"<import token>\n\n\ndef user(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/user.html', context)\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"<import token>\n<function token>\n\n\ndef admin(request):\n context = {'users': User.objects.all(), 'user_level': User.objects.get(\n id=request.session['user_id'])}\n return render(request, 'dashboard/admin.html', context)\n",
"<import token>\n<function token>\n<function token>\n"
] | false |
571 |
937d01eaa82cbfe07b20fae9320c554a0960d7b1
|
import sys
sys.stdin = open('input.txt', 'rt')
BLOCK_0 = 1
BLOCK_1 = 2
BLOCK_2 = 3
N = int(input())
X, Y = 10, 10
# x: 행 , y: 열A
GRN = 0
BLU = 1
maps = [[0]*Y for _ in range(X)]
dx = [1, 0]
dy = [0, 1]
def outMaps(x, y):
global X, Y
if 0<=x<X and 0<=y<Y: return False
else: return True
def meetBlock(x, y, maps):
if maps[x][y] == 1: return True
else: return False
def onlyUpdate(n_blocks, xs, ys, maps):
for i in range(n_blocks):
maps[xs[i]][ys[i]] = 1
def oneLineFull(maps, CLR):
for i in range(4, 10):
for j in range(4):
if CLR == GRN and maps[i][j] == 0:
break
elif CLR == BLU and maps[j][i] == 0:
break
else: # 전부 1이여서 full line일 때
return True, i
return False, 0
def pullAndUpdate(olf_idx, maps, CLR):
#for olf in list_olf:
for i in range(olf_idx, 3, -1):
for j in range(4):
if CLR == GRN:
if olf_idx == 4:
maps[i][j] = 0
else:
maps[i][j] = maps[i-1][j]
maps[i-1][j] = 0
elif CLR == BLU:
if olf_idx == 4:
maps[j][i] = 0
else:
maps[j][i] = maps[j][i-1]
maps[j][i-1] = 0
def pushAndPullUpdate(n_inBorder, maps, CLR):
for i in range(10-1-n_inBorder, 3, -1):
for j in range(4):
if CLR == GRN:
maps[i+n_inBorder][j] = maps[i][j]
maps[i][j] = 0
elif CLR == BLU:
maps[j][i+n_inBorder] = maps[j][i]
maps[j][i] = 0
def print_maps(maps):
global X, Y
for i in range(X):
for j in range(Y):
print(maps[i][j], end=' ')
print()
print()
def isBlockInBorder(maps, CLR):
cnt = 0
for i in range(4, 6):
for j in range(4):
if (CLR == GRN and maps[i][j] == 1) or (CLR == BLU and maps[j][i] == 1):
cnt += 1
break
return cnt
def Mover(n_blocks, xs_ori, ys_ori, maps, CLR):
xs = xs_ori.copy()
ys = ys_ori.copy()
score = 0
STOP_FLAG = False
while not STOP_FLAG:
for i in range(n_blocks):
xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]
if outMaps(xt, yt):
STOP_FLAG = True
break
if meetBlock(xt, yt, maps):
STOP_FLAG = True
break
else:
# break 걸리지 않고 넘어왔으므로, update
for i in range(n_blocks):
xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]
# 만약 STOP_FLAG == True 로 탈출했다면
# 해당 상자의 이동이 끝난 것 이므로 한 줄이 전부 차있는 것이 있는지 check
# maps에 업데이트
onlyUpdate(n_blocks, xs, ys, maps)
# 만약 one line full 인 라인이 있다면
OLF_FLAG = True
while OLF_FLAG:
OLF_FLAG, olf_idx = oneLineFull(maps, CLR)
if OLF_FLAG:
score += 1
pullAndUpdate(olf_idx, maps, CLR)
# 만약 경계안에 block이 존재한다면
n_inBorder = isBlockInBorder(maps, CLR)
if n_inBorder:
pushAndPullUpdate(n_inBorder, maps, CLR)
return score
def Area_score(maps, CLR):
score = 0
for i in range(4, 10):
for j in range(4):
if CLR == GRN: score += maps[i][j]
elif CLR == BLU: score += maps[j][i]
return score
total_score = 0
for i in range(N):
t, x, y = map(int, input().split())
xs, ys = [x], [y]
if t == BLOCK_0:
n_blocks = 1
elif t == BLOCK_1:
n_blocks = 2
xs.append(x)
ys.append(y+1)
elif t == BLOCK_2:
n_blocks = 2
xs.append(x+1)
ys.append(y)
total_score += Mover(n_blocks, xs, ys, maps, GRN)
total_score += Mover(n_blocks, xs, ys, maps, BLU)
#print_maps(maps)
grn_score = Area_score(maps, GRN)
blu_score = Area_score(maps, BLU)
print(total_score)
print(grn_score+blu_score)
|
[
"import sys\nsys.stdin = open('input.txt', 'rt')\nBLOCK_0 = 1\nBLOCK_1 = 2\nBLOCK_2 = 3\nN = int(input())\nX, Y = 10, 10\n# x: 행 , y: 열A\nGRN = 0\nBLU = 1\nmaps = [[0]*Y for _ in range(X)]\ndx = [1, 0]\ndy = [0, 1]\n\ndef outMaps(x, y):\n global X, Y\n if 0<=x<X and 0<=y<Y: return False\n else: return True\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1: return True\n else: return False\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else: # 전부 1이여서 full line일 때\n return True, i\n return False, 0\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n #for olf in list_olf:\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i-1][j]\n maps[i-1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i-1]\n maps[j][i-1] = 0\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10-1-n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i+n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i+n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if (CLR == GRN and maps[i][j] == 1) or (CLR == BLU and maps[j][i] == 1):\n cnt += 1\n break\n return cnt\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n # break 걸리지 않고 넘어왔으므로, update\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n # 만약 STOP_FLAG == True 로 탈출했다면\n # 해당 상자의 이동이 끝난 것 이므로 한 줄이 전부 차있는 것이 있는지 check\n # maps에 업데이트\n onlyUpdate(n_blocks, xs, ys, maps)\n \n # 만약 one line full 인 라인이 있다면\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n\n # 만약 경계안에 block이 존재한다면\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN: score += maps[i][j]\n elif CLR == BLU: score += maps[j][i]\n return score\n\n\ntotal_score = 0\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y+1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x+1)\n ys.append(y)\n\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\n #print_maps(maps)\n\ngrn_score = Area_score(maps, GRN)\nblu_score = Area_score(maps, BLU)\n\nprint(total_score)\nprint(grn_score+blu_score)\n\n\n\n\n",
"import sys\nsys.stdin = open('input.txt', 'rt')\nBLOCK_0 = 1\nBLOCK_1 = 2\nBLOCK_2 = 3\nN = int(input())\nX, Y = 10, 10\nGRN = 0\nBLU = 1\nmaps = [([0] * Y) for _ in range(X)]\ndx = [1, 0]\ndy = [0, 1]\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\ntotal_score = 0\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y + 1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x + 1)\n ys.append(y)\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\ngrn_score = Area_score(maps, GRN)\nblu_score = Area_score(maps, BLU)\nprint(total_score)\nprint(grn_score + blu_score)\n",
"<import token>\nsys.stdin = open('input.txt', 'rt')\nBLOCK_0 = 1\nBLOCK_1 = 2\nBLOCK_2 = 3\nN = int(input())\nX, Y = 10, 10\nGRN = 0\nBLU = 1\nmaps = [([0] * Y) for _ in range(X)]\ndx = [1, 0]\ndy = [0, 1]\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\ntotal_score = 0\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y + 1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x + 1)\n ys.append(y)\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\ngrn_score = Area_score(maps, GRN)\nblu_score = Area_score(maps, BLU)\nprint(total_score)\nprint(grn_score + blu_score)\n",
"<import token>\n<assignment token>\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<assignment token>\nfor i in range(N):\n t, x, y = map(int, input().split())\n xs, ys = [x], [y]\n if t == BLOCK_0:\n n_blocks = 1\n elif t == BLOCK_1:\n n_blocks = 2\n xs.append(x)\n ys.append(y + 1)\n elif t == BLOCK_2:\n n_blocks = 2\n xs.append(x + 1)\n ys.append(y)\n total_score += Mover(n_blocks, xs, ys, maps, GRN)\n total_score += Mover(n_blocks, xs, ys, maps, BLU)\n<assignment token>\nprint(total_score)\nprint(grn_score + blu_score)\n",
"<import token>\n<assignment token>\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\ndef oneLineFull(maps, CLR):\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 0:\n break\n elif CLR == BLU and maps[j][i] == 0:\n break\n else:\n return True, i\n return False, 0\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\n<function token>\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\ndef Area_score(maps, CLR):\n score = 0\n for i in range(4, 10):\n for j in range(4):\n if CLR == GRN:\n score += maps[i][j]\n elif CLR == BLU:\n score += maps[j][i]\n return score\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef outMaps(x, y):\n global X, Y\n if 0 <= x < X and 0 <= y < Y:\n return False\n else:\n return True\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\n<function token>\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\n<function token>\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\ndef isBlockInBorder(maps, CLR):\n cnt = 0\n for i in range(4, 6):\n for j in range(4):\n if CLR == GRN and maps[i][j] == 1 or CLR == BLU and maps[j][i\n ] == 1:\n cnt += 1\n break\n return cnt\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef meetBlock(x, y, maps):\n if maps[x][y] == 1:\n return True\n else:\n return False\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\n<function token>\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\n<function token>\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\n<function token>\n\n\ndef pullAndUpdate(olf_idx, maps, CLR):\n for i in range(olf_idx, 3, -1):\n for j in range(4):\n if CLR == GRN:\n if olf_idx == 4:\n maps[i][j] = 0\n else:\n maps[i][j] = maps[i - 1][j]\n maps[i - 1][j] = 0\n elif CLR == BLU:\n if olf_idx == 4:\n maps[j][i] = 0\n else:\n maps[j][i] = maps[j][i - 1]\n maps[j][i - 1] = 0\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\n<function token>\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef onlyUpdate(n_blocks, xs, ys, maps):\n for i in range(n_blocks):\n maps[xs[i]][ys[i]] = 1\n\n\n<function token>\n<function token>\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\n<function token>\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\n<function token>\n\n\ndef Mover(n_blocks, xs_ori, ys_ori, maps, CLR):\n xs = xs_ori.copy()\n ys = ys_ori.copy()\n score = 0\n STOP_FLAG = False\n while not STOP_FLAG:\n for i in range(n_blocks):\n xt, yt = xs[i] + dx[CLR], ys[i] + dy[CLR]\n if outMaps(xt, yt):\n STOP_FLAG = True\n break\n if meetBlock(xt, yt, maps):\n STOP_FLAG = True\n break\n else:\n for i in range(n_blocks):\n xs[i], ys[i] = xs[i] + dx[CLR], ys[i] + dy[CLR]\n onlyUpdate(n_blocks, xs, ys, maps)\n OLF_FLAG = True\n while OLF_FLAG:\n OLF_FLAG, olf_idx = oneLineFull(maps, CLR)\n if OLF_FLAG:\n score += 1\n pullAndUpdate(olf_idx, maps, CLR)\n n_inBorder = isBlockInBorder(maps, CLR)\n if n_inBorder:\n pushAndPullUpdate(n_inBorder, maps, CLR)\n return score\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\ndef print_maps(maps):\n global X, Y\n for i in range(X):\n for j in range(Y):\n print(maps[i][j], end=' ')\n print()\n print()\n\n\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef pushAndPullUpdate(n_inBorder, maps, CLR):\n for i in range(10 - 1 - n_inBorder, 3, -1):\n for j in range(4):\n if CLR == GRN:\n maps[i + n_inBorder][j] = maps[i][j]\n maps[i][j] = 0\n elif CLR == BLU:\n maps[j][i + n_inBorder] = maps[j][i]\n maps[j][i] = 0\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
572 |
ebe546794131eddea396bd6b82fbb41aeead4661
|
# -*- coding: utf-8 -*-
"""
This is a simple sample for seuif.py
License: this code is in the public domain
Author: Cheng Maohua
Email: [email protected]
Last modified: 2016.4.20
"""
from seuif97 import *
import matplotlib.pyplot as plt
import numpy as np
p1,t1 = 16, 535
p2,t2 = 3.56,315
h1 = pt2h(p1, t1)
s1 = pt2s(p1, t1)
h2 = pt2h(p2, t2)
s2 = pt2s(p2, t2)
h2s = ps2h(p2, s1)
his = ishd(p1, t1, p2)
ef = ief(p1, t1, p2, t2)
# print('The isentropic efficiency is ',ef)
# 4条线:p1、p2 等压,等熵焓降线、膨胀线
samp = 0.01
smp1 = s1 - samp
hsmp1 = ps2h(p1, smp1)
sap1 = s1 + samp
hsap1 = ps2h(p1, sap1)
smt1 = s1 - samp
hsmt1 = ps2h(p1, smp1)
sat1 = s1 + samp
hsat1 = ts2h(t1, sap1)
point_p1_h = np.zeros(shape=3)
point_p1_h[0] = hsmp1
point_p1_h[1] = h1
point_p1_h[2] = hsap1
point_p1_s = np.zeros(shape=3)
point_p1_s[0] = smp1
point_p1_s[1] = s1
point_p1_s[2] = sap1
# p2
smp2 = s1 - samp # 等熵焓降点延伸
hsmp2 = ps2h(p2, smp2)
sap2 = s2 + samp
hsap2 = ps2h(p2, sap2)
smt2 = s2 - samp
hsmt2 = ps2h(p1, smp1)
sat2 = s2 + samp
hsat2 = ts2h(t2, sap1)
point_p2_h = np.zeros(shape=3)
point_p2_h[0] = hsmp2
point_p2_h[1] = h2
point_p2_h[2] = hsap2
point_p2_s = np.zeros(shape=3)
point_p2_s[0] = smp2
point_p2_s[1] = s2
point_p2_s[2] = sap2
# 等熵焓降
point_is_h = np.zeros(shape=2)
point_is_h[0] = h1
point_is_h[1] = h2s
point_is_s = np.zeros(shape=2)
point_is_s[0] = s1
point_is_s[1] = s1
# HP Expansion Line
point_hp_h = np.zeros(shape=2)
point_hp_h[0] = h1
point_hp_h[1] = h2
point_hp_s = np.zeros(shape=2)
point_hp_s[0] = s1
point_hp_s[1] = s2
plt.plot(point_p1_s, point_p1_h, 'bs-')
plt.plot(point_p2_s, point_p2_h, 'bs-')
plt.plot(point_is_s, point_is_h, 'ys-')
plt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')
_title = 'The isentropic efficiency = ' + \
r'$\frac{h1-h2}{h1-h2s}$' + '=' + '{:.2f}'.format(ef) + '%'
plt.legend(loc="best", bbox_to_anchor=[0.5, 0.5],
ncol=2, shadow=True, title=_title)
# annotate some interesting points
plt.annotate('(P1,T1)',
xy=(s1, h1), xycoords='data',
xytext=(+10, +30), textcoords='offset points', fontsize=12,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
plt.annotate('(P2,T2)',
xy=(s2, h2), xycoords='data',
xytext=(+10, +30), textcoords='offset points', fontsize=12,
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
plt.xlabel('s(kJ/(kg.K))')
plt.ylabel('h(kJ/kg)')
plt.show()
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nThis is a simple sample for seuif.py\n\nLicense: this code is in the public domain\n\nAuthor: Cheng Maohua\nEmail: [email protected]\n\nLast modified: 2016.4.20\n\n\"\"\"\nfrom seuif97 import *\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\np1,t1 = 16, 535\np2,t2 = 3.56,315\nh1 = pt2h(p1, t1)\ns1 = pt2s(p1, t1)\n\nh2 = pt2h(p2, t2)\ns2 = pt2s(p2, t2)\n\nh2s = ps2h(p2, s1)\nhis = ishd(p1, t1, p2)\n\nef = ief(p1, t1, p2, t2)\n\n# print('The isentropic efficiency is ',ef)\n\n# 4条线:p1、p2 等压,等熵焓降线、膨胀线\n\nsamp = 0.01\n\nsmp1 = s1 - samp\nhsmp1 = ps2h(p1, smp1)\nsap1 = s1 + samp\nhsap1 = ps2h(p1, sap1)\n\nsmt1 = s1 - samp\nhsmt1 = ps2h(p1, smp1)\nsat1 = s1 + samp\nhsat1 = ts2h(t1, sap1)\n\npoint_p1_h = np.zeros(shape=3)\npoint_p1_h[0] = hsmp1\npoint_p1_h[1] = h1\npoint_p1_h[2] = hsap1\npoint_p1_s = np.zeros(shape=3)\npoint_p1_s[0] = smp1\npoint_p1_s[1] = s1\npoint_p1_s[2] = sap1\n\n# p2\nsmp2 = s1 - samp # 等熵焓降点延伸\nhsmp2 = ps2h(p2, smp2)\nsap2 = s2 + samp\nhsap2 = ps2h(p2, sap2)\n\nsmt2 = s2 - samp\nhsmt2 = ps2h(p1, smp1)\nsat2 = s2 + samp\nhsat2 = ts2h(t2, sap1)\n\npoint_p2_h = np.zeros(shape=3)\npoint_p2_h[0] = hsmp2\npoint_p2_h[1] = h2\npoint_p2_h[2] = hsap2\n\npoint_p2_s = np.zeros(shape=3)\npoint_p2_s[0] = smp2\npoint_p2_s[1] = s2\npoint_p2_s[2] = sap2\n\n# 等熵焓降\npoint_is_h = np.zeros(shape=2)\npoint_is_h[0] = h1\npoint_is_h[1] = h2s\npoint_is_s = np.zeros(shape=2)\npoint_is_s[0] = s1\npoint_is_s[1] = s1\n\n# HP Expansion Line\npoint_hp_h = np.zeros(shape=2)\npoint_hp_h[0] = h1\npoint_hp_h[1] = h2\npoint_hp_s = np.zeros(shape=2)\npoint_hp_s[0] = s1\npoint_hp_s[1] = s2\n\nplt.plot(point_p1_s, point_p1_h, 'bs-')\n\nplt.plot(point_p2_s, point_p2_h, 'bs-')\nplt.plot(point_is_s, point_is_h, 'ys-')\nplt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')\n\n_title = 'The isentropic efficiency = ' + \\\n r'$\\frac{h1-h2}{h1-h2s}$' + '=' + '{:.2f}'.format(ef) + '%'\n\nplt.legend(loc=\"best\", bbox_to_anchor=[0.5, 0.5],\n ncol=2, shadow=True, title=_title)\n\n# annotate some interesting points\nplt.annotate('(P1,T1)',\n xy=(s1, h1), xycoords='data',\n xytext=(+10, +30), textcoords='offset points', fontsize=12,\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3,rad=.2\"))\n\nplt.annotate('(P2,T2)',\n xy=(s2, h2), xycoords='data',\n xytext=(+10, +30), textcoords='offset points', fontsize=12,\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3,rad=.2\"))\n\nplt.xlabel('s(kJ/(kg.K))')\nplt.ylabel('h(kJ/kg)')\nplt.show()\n",
"<docstring token>\nfrom seuif97 import *\nimport matplotlib.pyplot as plt\nimport numpy as np\np1, t1 = 16, 535\np2, t2 = 3.56, 315\nh1 = pt2h(p1, t1)\ns1 = pt2s(p1, t1)\nh2 = pt2h(p2, t2)\ns2 = pt2s(p2, t2)\nh2s = ps2h(p2, s1)\nhis = ishd(p1, t1, p2)\nef = ief(p1, t1, p2, t2)\nsamp = 0.01\nsmp1 = s1 - samp\nhsmp1 = ps2h(p1, smp1)\nsap1 = s1 + samp\nhsap1 = ps2h(p1, sap1)\nsmt1 = s1 - samp\nhsmt1 = ps2h(p1, smp1)\nsat1 = s1 + samp\nhsat1 = ts2h(t1, sap1)\npoint_p1_h = np.zeros(shape=3)\npoint_p1_h[0] = hsmp1\npoint_p1_h[1] = h1\npoint_p1_h[2] = hsap1\npoint_p1_s = np.zeros(shape=3)\npoint_p1_s[0] = smp1\npoint_p1_s[1] = s1\npoint_p1_s[2] = sap1\nsmp2 = s1 - samp\nhsmp2 = ps2h(p2, smp2)\nsap2 = s2 + samp\nhsap2 = ps2h(p2, sap2)\nsmt2 = s2 - samp\nhsmt2 = ps2h(p1, smp1)\nsat2 = s2 + samp\nhsat2 = ts2h(t2, sap1)\npoint_p2_h = np.zeros(shape=3)\npoint_p2_h[0] = hsmp2\npoint_p2_h[1] = h2\npoint_p2_h[2] = hsap2\npoint_p2_s = np.zeros(shape=3)\npoint_p2_s[0] = smp2\npoint_p2_s[1] = s2\npoint_p2_s[2] = sap2\npoint_is_h = np.zeros(shape=2)\npoint_is_h[0] = h1\npoint_is_h[1] = h2s\npoint_is_s = np.zeros(shape=2)\npoint_is_s[0] = s1\npoint_is_s[1] = s1\npoint_hp_h = np.zeros(shape=2)\npoint_hp_h[0] = h1\npoint_hp_h[1] = h2\npoint_hp_s = np.zeros(shape=2)\npoint_hp_s[0] = s1\npoint_hp_s[1] = s2\nplt.plot(point_p1_s, point_p1_h, 'bs-')\nplt.plot(point_p2_s, point_p2_h, 'bs-')\nplt.plot(point_is_s, point_is_h, 'ys-')\nplt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')\n_title = ('The isentropic efficiency = ' + '$\\\\frac{h1-h2}{h1-h2s}$' + '=' +\n '{:.2f}'.format(ef) + '%')\nplt.legend(loc='best', bbox_to_anchor=[0.5, 0.5], ncol=2, shadow=True,\n title=_title)\nplt.annotate('(P1,T1)', xy=(s1, h1), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.annotate('(P2,T2)', xy=(s2, h2), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.xlabel('s(kJ/(kg.K))')\nplt.ylabel('h(kJ/kg)')\nplt.show()\n",
"<docstring token>\n<import token>\np1, t1 = 16, 535\np2, t2 = 3.56, 315\nh1 = pt2h(p1, t1)\ns1 = pt2s(p1, t1)\nh2 = pt2h(p2, t2)\ns2 = pt2s(p2, t2)\nh2s = ps2h(p2, s1)\nhis = ishd(p1, t1, p2)\nef = ief(p1, t1, p2, t2)\nsamp = 0.01\nsmp1 = s1 - samp\nhsmp1 = ps2h(p1, smp1)\nsap1 = s1 + samp\nhsap1 = ps2h(p1, sap1)\nsmt1 = s1 - samp\nhsmt1 = ps2h(p1, smp1)\nsat1 = s1 + samp\nhsat1 = ts2h(t1, sap1)\npoint_p1_h = np.zeros(shape=3)\npoint_p1_h[0] = hsmp1\npoint_p1_h[1] = h1\npoint_p1_h[2] = hsap1\npoint_p1_s = np.zeros(shape=3)\npoint_p1_s[0] = smp1\npoint_p1_s[1] = s1\npoint_p1_s[2] = sap1\nsmp2 = s1 - samp\nhsmp2 = ps2h(p2, smp2)\nsap2 = s2 + samp\nhsap2 = ps2h(p2, sap2)\nsmt2 = s2 - samp\nhsmt2 = ps2h(p1, smp1)\nsat2 = s2 + samp\nhsat2 = ts2h(t2, sap1)\npoint_p2_h = np.zeros(shape=3)\npoint_p2_h[0] = hsmp2\npoint_p2_h[1] = h2\npoint_p2_h[2] = hsap2\npoint_p2_s = np.zeros(shape=3)\npoint_p2_s[0] = smp2\npoint_p2_s[1] = s2\npoint_p2_s[2] = sap2\npoint_is_h = np.zeros(shape=2)\npoint_is_h[0] = h1\npoint_is_h[1] = h2s\npoint_is_s = np.zeros(shape=2)\npoint_is_s[0] = s1\npoint_is_s[1] = s1\npoint_hp_h = np.zeros(shape=2)\npoint_hp_h[0] = h1\npoint_hp_h[1] = h2\npoint_hp_s = np.zeros(shape=2)\npoint_hp_s[0] = s1\npoint_hp_s[1] = s2\nplt.plot(point_p1_s, point_p1_h, 'bs-')\nplt.plot(point_p2_s, point_p2_h, 'bs-')\nplt.plot(point_is_s, point_is_h, 'ys-')\nplt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')\n_title = ('The isentropic efficiency = ' + '$\\\\frac{h1-h2}{h1-h2s}$' + '=' +\n '{:.2f}'.format(ef) + '%')\nplt.legend(loc='best', bbox_to_anchor=[0.5, 0.5], ncol=2, shadow=True,\n title=_title)\nplt.annotate('(P1,T1)', xy=(s1, h1), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.annotate('(P2,T2)', xy=(s2, h2), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.xlabel('s(kJ/(kg.K))')\nplt.ylabel('h(kJ/kg)')\nplt.show()\n",
"<docstring token>\n<import token>\n<assignment token>\nplt.plot(point_p1_s, point_p1_h, 'bs-')\nplt.plot(point_p2_s, point_p2_h, 'bs-')\nplt.plot(point_is_s, point_is_h, 'ys-')\nplt.plot(point_hp_s, point_hp_h, 'rs-', label='Expansion Line')\n<assignment token>\nplt.legend(loc='best', bbox_to_anchor=[0.5, 0.5], ncol=2, shadow=True,\n title=_title)\nplt.annotate('(P1,T1)', xy=(s1, h1), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.annotate('(P2,T2)', xy=(s2, h2), xycoords='data', xytext=(+10, +30),\n textcoords='offset points', fontsize=12, arrowprops=dict(arrowstyle=\n '->', connectionstyle='arc3,rad=.2'))\nplt.xlabel('s(kJ/(kg.K))')\nplt.ylabel('h(kJ/kg)')\nplt.show()\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
573 |
906265182a9776fec5bad41bfc9ee68b36873d1e
|
#예외처리 문법을 활용하여 정수가 아닌 숫자를 입력했을때 에러문구가나오도록 작성.(에러문구:정수가아닙니다)
try:
x = int(input('정수를 입력하세요: '))
print(x)
except:
print('정수가 아닙니다.')
|
[
"#예외처리 문법을 활용하여 정수가 아닌 숫자를 입력했을때 에러문구가나오도록 작성.(에러문구:정수가아닙니다)\n\ntry:\n x = int(input('정수를 입력하세요: '))\n print(x)\n \nexcept:\n print('정수가 아닙니다.')\n",
"try:\n x = int(input('정수를 입력하세요: '))\n print(x)\nexcept:\n print('정수가 아닙니다.')\n",
"<code token>\n"
] | false |
574 |
b7aa99e9e4af3bef4b2b3e7d8ab9bf159a093af6
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (C) 2011 Lionel Bergeret
#
# ----------------------------------------------------------------
# The contents of this file are distributed under the CC0 license.
# See http://creativecommons.org/publicdomain/zero/1.0/
# ----------------------------------------------------------------
import os
import re
import cPickle
from optparse import OptionParser
# Import shapefile informations
from shapelib import ShapeFile
import dbflib
# Shapely
from shapely.geometry import Polygon
from shapely.ops import cascaded_union
# Numpy and matplotlib
import numpy as np
from matplotlib.nxutils import points_inside_poly
try:
import psyco
psyco.full()
except ImportError:
print "Psyco plugin missing, will run slower"
pass
def main(shapefile, picklefile):
if picklefile:
[npts, x, y, z, zraw, xil, yil, grid, missing] = cPickle.load(open(picklefile,'rb'))
points = np.vstack((x,y)).T
# Load administrative area
shp = ShapeFile(shapefile)
dbf = dbflib.open(shapefile)
coastline = []
# Process every shape from the ShapeFile
print "Processing shapes ..."
for npoly in range(shp.info()[0]):
shp_object = shp.read_object(npoly)
shp_dict = dbf.read_record(npoly)
verts = shp_object.vertices()
if "NAME_1" in shp_dict:
name = "%s" % (shp_dict["NAME_1"])
else:
name = "Unknown"
print "Processing %s" % (name)
# Extract city polygon vertices (ring per ring)
for ring in verts:
vx = []
vy = []
for point in ring:
vx.append(point[0])
vy.append(point[1])
# Only process big enough rings
if len(vx) > 256: # big enough
poly_verts = zip(vx,vy)
if picklefile:
# Compute intersections with the city
intersection = points_inside_poly(points, poly_verts)
npts = sum(1 for x in points_inside_poly(points, poly_verts) if x)
else:
npts = 1 # Add this polygon
# Add the ring to the coastine if measures inside
if npts > 0:
polygon = Polygon(poly_verts)
if not polygon.is_empty and polygon.is_valid:
print "- Add polygon (%d)" % (len(vx))
coastline.append(polygon)
else:
print "- Skip polygon (%d)" % (len(vx))
print "Union of %d polygons" % len(coastline)
coast = cascaded_union(coastline)
cPickle.dump(coast,open('coastline.pickle','wb'),-1)
print "Done."
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == '__main__':
parser = OptionParser("Usage: safecastCoastline <shapefile>")
parser.add_option("-s", "--safecast", dest="scfilename",
help="provice the safecast.pickle file for intersections.", metavar="FILE")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Wrong number of arguments")
main(args[0], options.scfilename)
|
[
"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# Copyright (C) 2011 Lionel Bergeret\n#\n# ----------------------------------------------------------------\n# The contents of this file are distributed under the CC0 license.\n# See http://creativecommons.org/publicdomain/zero/1.0/\n# ----------------------------------------------------------------\n\nimport os\nimport re\nimport cPickle\nfrom optparse import OptionParser\n\n# Import shapefile informations\nfrom shapelib import ShapeFile\nimport dbflib\n\n# Shapely\nfrom shapely.geometry import Polygon\nfrom shapely.ops import cascaded_union\n\n# Numpy and matplotlib\nimport numpy as np\nfrom matplotlib.nxutils import points_inside_poly\n\ntry:\n import psyco\n psyco.full()\nexcept ImportError:\n print \"Psyco plugin missing, will run slower\"\n pass\n\ndef main(shapefile, picklefile):\n if picklefile:\n [npts, x, y, z, zraw, xil, yil, grid, missing] = cPickle.load(open(picklefile,'rb'))\n points = np.vstack((x,y)).T\n\n # Load administrative area\n shp = ShapeFile(shapefile)\n dbf = dbflib.open(shapefile)\n\n coastline = []\n\n # Process every shape from the ShapeFile\n print \"Processing shapes ...\"\n for npoly in range(shp.info()[0]):\n shp_object = shp.read_object(npoly)\n shp_dict = dbf.read_record(npoly)\n verts = shp_object.vertices()\n\n if \"NAME_1\" in shp_dict:\n name = \"%s\" % (shp_dict[\"NAME_1\"])\n else:\n name = \"Unknown\"\n\n print \"Processing %s\" % (name)\n # Extract city polygon vertices (ring per ring)\n for ring in verts:\n vx = []\n vy = []\n for point in ring:\n vx.append(point[0])\n vy.append(point[1])\n\n # Only process big enough rings\n if len(vx) > 256: # big enough\n poly_verts = zip(vx,vy)\n\n if picklefile:\n # Compute intersections with the city\n intersection = points_inside_poly(points, poly_verts)\n npts = sum(1 for x in points_inside_poly(points, poly_verts) if x)\n else:\n npts = 1 # Add this polygon\n\n # Add the ring to the coastine if measures inside\n if npts > 0:\n polygon = Polygon(poly_verts)\n if not polygon.is_empty and polygon.is_valid:\n print \"- Add polygon (%d)\" % (len(vx))\n coastline.append(polygon)\n else:\n print \"- Skip polygon (%d)\" % (len(vx))\n \n print \"Union of %d polygons\" % len(coastline)\n coast = cascaded_union(coastline)\n cPickle.dump(coast,open('coastline.pickle','wb'),-1)\n print \"Done.\"\n\n# -----------------------------------------------------------------------------\n# Main\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n parser = OptionParser(\"Usage: safecastCoastline <shapefile>\")\n parser.add_option(\"-s\", \"--safecast\", dest=\"scfilename\",\n help=\"provice the safecast.pickle file for intersections.\", metavar=\"FILE\")\n\n (options, args) = parser.parse_args()\n \n if len(args) != 1:\n parser.error(\"Wrong number of arguments\")\n\n main(args[0], options.scfilename)\n"
] | true |
575 |
aa4d872c6a529d8acf18f1c3b477bc1816ac2887
|
adict ={'name':'bob','age':23}
print('bob' in adict)
print('name'in adict)
for key in adict:
print('%s:%s'%(key,adict[key]))
print('%(name)s:%(age)s'%adict)
|
[
"adict ={'name':'bob','age':23}\nprint('bob' in adict)\nprint('name'in adict)\nfor key in adict:\n print('%s:%s'%(key,adict[key]))\nprint('%(name)s:%(age)s'%adict)",
"adict = {'name': 'bob', 'age': 23}\nprint('bob' in adict)\nprint('name' in adict)\nfor key in adict:\n print('%s:%s' % (key, adict[key]))\nprint('%(name)s:%(age)s' % adict)\n",
"<assignment token>\nprint('bob' in adict)\nprint('name' in adict)\nfor key in adict:\n print('%s:%s' % (key, adict[key]))\nprint('%(name)s:%(age)s' % adict)\n",
"<assignment token>\n<code token>\n"
] | false |
576 |
ebc050544da69837cc2b8977f347380b94474bab
|
import os
import numpy as np
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, concatenate
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation
from keras.layers.normalization import BatchNormalization
from keras.optimizers import SGD
from keras.utils import np_utils
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, f1_score
import seaborn as sns
from keras.layers import Input, Dense, Add, Multiply
# macOS特有の警告文を非表示(GPUがないからCPUでやるときに出る)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# パラメータの初期化
classes = [
"normal cells",
"blasts",
"blasts_highSSC_granulocytes",
"blasts_highSSC_middle_ugly",
"blasts_highSSC_upper_dead",
]
num_classes = len(classes)
image_size = 66
# データの読み込み
imagefiles = np.load("imagefiles_supplementary.npz")
X_train = imagefiles['X_train']
X_test = imagefiles['X_test']
y_train = imagefiles['y_train']
y_test = imagefiles['y_test']
# グレースケール画像をCNNに入力するための次元操作
X_train = X_train.reshape((-1, image_size, image_size, 1))
X_test = X_test.reshape((-1, image_size, image_size, 1))
# データの正規化
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
# OneHotVector化する(正解ラベルの位置に1がつく)
y_train = np_utils.to_categorical(y_train, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
def _build(_input, *nodes):
x = _input
for node in nodes:
if callable(node):
x = node(x)
elif isinstance(node, list):
x = [_build(x, branch) for branch in node]
elif isinstance(node, tuple):
x = _build(x, *node)
else:
x = node
return x
_input = Input(X_train.shape[1:])
output = _build(
_input,
# Reduction dual-path module×3の定義
# ---------------------------
# 畳み込み層の追加(96:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# MaxPooling
# ---------------------------
# Reduction dual-path module1
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Reduction dual-path module2
Add(),
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Reduction dual-path module3
Add(),
[(Conv2D(96, (3, 3), strides=(2, 2)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu')),
MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],
# Dual-path modules×10の定義
# ---------------------------
# 畳み込み層の追加(112:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# Dual-path modules2の定義
# 畳み込み層の追加(48:フィルタ数)
# バッチ正規化
# 活性化関数:ReLu
# ---------------------------
# Dual-path modules1
Add(),
[(Conv2D(112, (1, 1), strides=(1, 1)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu'),
),
(Conv2D(48, (3, 3), strides=(1, 1)),
BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
beta_initializer='zeros', gamma_initializer='ones',
moving_mean_initializer='zeros', moving_variance_initializer='ones',
beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
gamma_constraint=None),
Activation('relu'),
)],
# # Dual-path modules2
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules3
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules4
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules5
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules6
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules7
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules8
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules9
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# # Dual-path modules10
# Add(),
# [(Conv2D(112, (1, 1), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu')),
# (Conv2D(48, (3, 3), strides=(1, 1)),
# BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,
# beta_initializer='zeros', gamma_initializer='ones',
# moving_mean_initializer='zeros', moving_variance_initializer='ones',
# beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,
# gamma_constraint=None),
# Activation('relu'))],
# 全結合
Add(),
[MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),
Flatten(),
Dense(256, activation='relu'),
Dropout(0.5),
Dense(num_classes, activation='softmax')
]
)
model = Model(_input, output)
model.summary()
# # 損失関数の設定
# opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)
# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
#
# # トレーニングの実施
# # 学習
# print("start training")
# hist = model.fit(X_train, y_train, batch_size=32, epochs=30, validation_data=(X_test, y_test))
# # 評価
# print("start eval")
# score = model.evaluate(X_test, y_test, batch_size=32, verbose=1) # verbose:途中結果表示
# print('Test Loss: ', score[0])
# print('Test Accuracy: ', score[1])
#
# model.save('leukemia_cnn_supplementary.h5')
#
# # 学習の様子をグラフへ描画
# # 正解率の推移をプロット
# fig = plt.figure()
# plt.plot(hist.history['accuracy'])
# plt.plot(hist.history['val_accuracy'])
# plt.title('Accuracy')
# plt.legend(['train', 'test'], loc='upper left')
# fig.savefig('result/cnn_supplementary/cnn_accuracy_supplementary.png')
# plt.close()
# # ロスの推移をプロット
# fig = plt.figure()
# plt.plot(hist.history['loss'])
# plt.plot(hist.history['val_loss'])
# plt.title('Loss')
# plt.legend(['train', 'test'], loc='upper left')
# fig.savefig('result/cnn_supplementary/cnn_loss_supplementary.png')
# plt.close()
# # Confusion matrix作成
# plt.figure()
# y_pred = model.predict(X_test)
# y_test = imagefiles['y_test'] # one hot vector化されているのでロードし直す
# cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1))
# ticklabels = ["blasts_highSSC_granulocytes",
# "blasts_highSSC_middle_ugly",
# "blasts",
# "normal cells",
# "blasts_highSSC_upper_dead"]
# sns.heatmap(cm, annot=True, cmap='Blues', yticklabels=ticklabels, xticklabels=ticklabels)
# plt.ylabel("Correct")
# plt.xlabel("Prediction")
# plt.tight_layout()
# plt.savefig('result/cnn_supplementary/confusion_matrix_cnn_supplementary.png')
# plt.close()
#
# # F1 micro/macro
# f1_macro = f1_score(y_test, np.argmax(y_pred, axis=1), average="macro")
# f1_micro = f1_score(y_test, np.argmax(y_pred, axis=1), average="micro")
# print(f"f1_macro:{f1_macro}")
# print(f"f1_miro:{f1_micro}")
|
[
"import os\n\n\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, concatenate\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport seaborn as sns\nfrom keras.layers import Input, Dense, Add, Multiply\n\n# macOS特有の警告文を非表示(GPUがないからCPUでやるときに出る)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\n# パラメータの初期化\nclasses = [\n \"normal cells\",\n \"blasts\",\n \"blasts_highSSC_granulocytes\",\n \"blasts_highSSC_middle_ugly\",\n \"blasts_highSSC_upper_dead\",\n]\nnum_classes = len(classes)\nimage_size = 66\n\n# データの読み込み\nimagefiles = np.load(\"imagefiles_supplementary.npz\")\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\n# グレースケール画像をCNNに入力するための次元操作\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\n# データの正規化\nX_train = X_train.astype(\"float32\")\nX_test = X_test.astype(\"float32\")\n# OneHotVector化する(正解ラベルの位置に1がつく)\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(\n _input,\n # Reduction dual-path module×3の定義\n # ---------------------------\n # 畳み込み層の追加(96:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # MaxPooling\n # ---------------------------\n # Reduction dual-path module1\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n # Reduction dual-path module2\n Add(),\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n # Reduction dual-path module3\n Add(),\n [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu')),\n MaxPooling2D(pool_size=(3, 3), strides=(2, 2))],\n\n # Dual-path modules×10の定義\n # ---------------------------\n # 畳み込み層の追加(112:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # Dual-path modules2の定義\n # 畳み込み層の追加(48:フィルタ数)\n # バッチ正規化\n # 活性化関数:ReLu\n # ---------------------------\n # Dual-path modules1\n Add(),\n [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu'),\n ),\n (Conv2D(48, (3, 3), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None),\n Activation('relu'),\n )],\n # # Dual-path modules2\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules3\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules4\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules5\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules6\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules7\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules8\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules9\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # # Dual-path modules10\n # Add(),\n # [(Conv2D(112, (1, 1), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu')),\n # (Conv2D(48, (3, 3), strides=(1, 1)),\n # BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True, scale=True,\n # beta_initializer='zeros', gamma_initializer='ones',\n # moving_mean_initializer='zeros', moving_variance_initializer='ones',\n # beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n # gamma_constraint=None),\n # Activation('relu'))],\n # 全結合\n Add(),\n [MaxPooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format=None),\n Flatten(),\n Dense(256, activation='relu'),\n Dropout(0.5),\n Dense(num_classes, activation='softmax')\n ]\n)\nmodel = Model(_input, output)\nmodel.summary()\n\n# # 損失関数の設定\n# opt = SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False)\n# model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])\n#\n# # トレーニングの実施\n# # 学習\n# print(\"start training\")\n# hist = model.fit(X_train, y_train, batch_size=32, epochs=30, validation_data=(X_test, y_test))\n# # 評価\n# print(\"start eval\")\n# score = model.evaluate(X_test, y_test, batch_size=32, verbose=1) # verbose:途中結果表示\n# print('Test Loss: ', score[0])\n# print('Test Accuracy: ', score[1])\n#\n# model.save('leukemia_cnn_supplementary.h5')\n#\n# # 学習の様子をグラフへ描画\n# # 正解率の推移をプロット\n# fig = plt.figure()\n# plt.plot(hist.history['accuracy'])\n# plt.plot(hist.history['val_accuracy'])\n# plt.title('Accuracy')\n# plt.legend(['train', 'test'], loc='upper left')\n# fig.savefig('result/cnn_supplementary/cnn_accuracy_supplementary.png')\n# plt.close()\n# # ロスの推移をプロット\n# fig = plt.figure()\n# plt.plot(hist.history['loss'])\n# plt.plot(hist.history['val_loss'])\n# plt.title('Loss')\n# plt.legend(['train', 'test'], loc='upper left')\n# fig.savefig('result/cnn_supplementary/cnn_loss_supplementary.png')\n# plt.close()\n# # Confusion matrix作成\n# plt.figure()\n# y_pred = model.predict(X_test)\n# y_test = imagefiles['y_test'] # one hot vector化されているのでロードし直す\n# cm = confusion_matrix(y_test, np.argmax(y_pred, axis=1))\n# ticklabels = [\"blasts_highSSC_granulocytes\",\n# \"blasts_highSSC_middle_ugly\",\n# \"blasts\",\n# \"normal cells\",\n# \"blasts_highSSC_upper_dead\"]\n# sns.heatmap(cm, annot=True, cmap='Blues', yticklabels=ticklabels, xticklabels=ticklabels)\n# plt.ylabel(\"Correct\")\n# plt.xlabel(\"Prediction\")\n# plt.tight_layout()\n# plt.savefig('result/cnn_supplementary/confusion_matrix_cnn_supplementary.png')\n# plt.close()\n#\n# # F1 micro/macro\n# f1_macro = f1_score(y_test, np.argmax(y_pred, axis=1), average=\"macro\")\n# f1_micro = f1_score(y_test, np.argmax(y_pred, axis=1), average=\"micro\")\n# print(f\"f1_macro:{f1_macro}\")\n# print(f\"f1_miro:{f1_micro}\")\n",
"import os\nimport numpy as np\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, concatenate\nfrom keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, Activation\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import SGD\nfrom keras.utils import np_utils\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, f1_score\nimport seaborn as sns\nfrom keras.layers import Input, Dense, Add, Multiply\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nclasses = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes',\n 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead']\nnum_classes = len(classes)\nimage_size = 66\nimagefiles = np.load('imagefiles_supplementary.npz')\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3),\n strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=\n 0.001, center=True, scale=True, beta_initializer='zeros',\n gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None,\n gamma_regularizer=None, beta_constraint=None, gamma_constraint=None),\n Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides=\n None, padding='valid', data_format=None), Flatten(), Dense(256,\n activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')]\n )\nmodel = Model(_input, output)\nmodel.summary()\n",
"<import token>\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\nclasses = ['normal cells', 'blasts', 'blasts_highSSC_granulocytes',\n 'blasts_highSSC_middle_ugly', 'blasts_highSSC_upper_dead']\nnum_classes = len(classes)\nimage_size = 66\nimagefiles = np.load('imagefiles_supplementary.npz')\nX_train = imagefiles['X_train']\nX_test = imagefiles['X_test']\ny_train = imagefiles['y_train']\ny_test = imagefiles['y_test']\nX_train = X_train.reshape((-1, image_size, image_size, 1))\nX_test = X_test.reshape((-1, image_size, image_size, 1))\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\ny_train = np_utils.to_categorical(y_train, num_classes)\ny_test = np_utils.to_categorical(y_test, num_classes)\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n_input = Input(X_train.shape[1:])\noutput = _build(_input, [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(96, (3, 3), strides=(2, 2)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), MaxPooling2D(pool_size=(3,\n 3), strides=(2, 2))], Add(), [(Conv2D(112, (1, 1), strides=(1, 1)),\n BatchNormalization(axis=-1, momentum=0.99, epsilon=0.001, center=True,\n scale=True, beta_initializer='zeros', gamma_initializer='ones',\n moving_mean_initializer='zeros', moving_variance_initializer='ones',\n beta_regularizer=None, gamma_regularizer=None, beta_constraint=None,\n gamma_constraint=None), Activation('relu')), (Conv2D(48, (3, 3),\n strides=(1, 1)), BatchNormalization(axis=-1, momentum=0.99, epsilon=\n 0.001, center=True, scale=True, beta_initializer='zeros',\n gamma_initializer='ones', moving_mean_initializer='zeros',\n moving_variance_initializer='ones', beta_regularizer=None,\n gamma_regularizer=None, beta_constraint=None, gamma_constraint=None),\n Activation('relu'))], Add(), [MaxPooling2D(pool_size=(2, 2), strides=\n None, padding='valid', data_format=None), Flatten(), Dense(256,\n activation='relu'), Dropout(0.5), Dense(num_classes, activation='softmax')]\n )\nmodel = Model(_input, output)\nmodel.summary()\n",
"<import token>\n<assignment token>\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n<assignment token>\nmodel.summary()\n",
"<import token>\n<assignment token>\n\n\ndef _build(_input, *nodes):\n x = _input\n for node in nodes:\n if callable(node):\n x = node(x)\n elif isinstance(node, list):\n x = [_build(x, branch) for branch in node]\n elif isinstance(node, tuple):\n x = _build(x, *node)\n else:\n x = node\n return x\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
577 |
ed2f3bbc7eb0a4d8f5ccdb7a12e00cbddab04dd0
|
_method_adaptors = dict()
def register_dist_adaptor(method_name):
def decorator(func):
_method_adaptors[method_name] = func
def wrapper(*args, **kwargs):
func(*args, **kwargs)
return wrapper
return decorator
def get_nearest_method(method_name, parser):
"""
all candidates toked
all protocol untoked
input:
queries:
[
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
(protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)
]
output:
[
nearest_idx1,
nearest_idx2,
nearest_idx3,
...
]
"""
return _method_adaptors[method_name](parser)
def get_method_names():
return list(_method_adaptors.keys())
|
[
"_method_adaptors = dict()\n\ndef register_dist_adaptor(method_name):\n def decorator(func):\n _method_adaptors[method_name] = func\n def wrapper(*args, **kwargs):\n func(*args, **kwargs)\n return wrapper\n return decorator\n\ndef get_nearest_method(method_name, parser):\n \"\"\"\n all candidates toked\n all protocol untoked\n input:\n queries:\n [\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n ]\n output:\n [\n nearest_idx1,\n nearest_idx2,\n nearest_idx3,\n ...\n ]\n \"\"\"\n return _method_adaptors[method_name](parser)\n\ndef get_method_names():\n return list(_method_adaptors.keys())\n ",
"_method_adaptors = dict()\n\n\ndef register_dist_adaptor(method_name):\n\n def decorator(func):\n _method_adaptors[method_name] = func\n\n def wrapper(*args, **kwargs):\n func(*args, **kwargs)\n return wrapper\n return decorator\n\n\ndef get_nearest_method(method_name, parser):\n \"\"\"\n all candidates toked\n all protocol untoked\n input:\n queries:\n [\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n ]\n output:\n [\n nearest_idx1,\n nearest_idx2,\n nearest_idx3,\n ...\n ]\n \"\"\"\n return _method_adaptors[method_name](parser)\n\n\ndef get_method_names():\n return list(_method_adaptors.keys())\n",
"<assignment token>\n\n\ndef register_dist_adaptor(method_name):\n\n def decorator(func):\n _method_adaptors[method_name] = func\n\n def wrapper(*args, **kwargs):\n func(*args, **kwargs)\n return wrapper\n return decorator\n\n\ndef get_nearest_method(method_name, parser):\n \"\"\"\n all candidates toked\n all protocol untoked\n input:\n queries:\n [\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n ]\n output:\n [\n nearest_idx1,\n nearest_idx2,\n nearest_idx3,\n ...\n ]\n \"\"\"\n return _method_adaptors[method_name](parser)\n\n\ndef get_method_names():\n return list(_method_adaptors.keys())\n",
"<assignment token>\n<function token>\n\n\ndef get_nearest_method(method_name, parser):\n \"\"\"\n all candidates toked\n all protocol untoked\n input:\n queries:\n [\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n (protocol, (candidate, sen_id, start, K), (candidate, sen_id, start, K), ...)\n ]\n output:\n [\n nearest_idx1,\n nearest_idx2,\n nearest_idx3,\n ...\n ]\n \"\"\"\n return _method_adaptors[method_name](parser)\n\n\ndef get_method_names():\n return list(_method_adaptors.keys())\n",
"<assignment token>\n<function token>\n<function token>\n\n\ndef get_method_names():\n return list(_method_adaptors.keys())\n",
"<assignment token>\n<function token>\n<function token>\n<function token>\n"
] | false |
578 |
837534ebc953dae966154921709398ab2b2e0b33
|
# © MNELAB developers
#
# License: BSD (3-clause)
from .dependencies import have
from .syntax import PythonHighlighter
from .utils import count_locations, image_path, interface_style, natural_sort
|
[
"# © MNELAB developers\n#\n# License: BSD (3-clause)\n\nfrom .dependencies import have\nfrom .syntax import PythonHighlighter\nfrom .utils import count_locations, image_path, interface_style, natural_sort\n",
"from .dependencies import have\nfrom .syntax import PythonHighlighter\nfrom .utils import count_locations, image_path, interface_style, natural_sort\n",
"<import token>\n"
] | false |
579 |
29c630b56eb56d91d1e917078138a2bbf562e0bf
|
import pyhs2
import sys
import datetime
i = datetime.datetime.now()
# args
if len(sys.argv) < 2:
print "Run with python version 2.6"
print "Requires arg: <orgId>"
sys.exit()
orgId = sys.argv[1]
print "\n\nCreating document external ID manifest for Org ID: " + orgId
## strings
fileLine = "%s\t%s\t%s\n" #external_id doc_source assign_authority
query = """select * from (select external_id, doc_source, assign_authority from summary_doc_manifest where org_id = '%s'
UNION ALL select get_json_object(line, '$.document.id') as external_id, get_json_object(line, '$.document.source') as doc_source,
get_json_object(line, '$.document.assignAuthority') as assign_authority from production_logs_datacheckandrecover_epoch
where get_json_object(line, '$.docManifest') is not null and get_json_object(line, '$.orgId') = '%s'
and day=%s and month=%s and year=2014) joined_table""" %(orgId, orgId, i.day, i.month)
fileName = orgId + "-manifest"
## hive connection
conn = pyhs2.connect(host='10.196.47.205',
port=10000,
authMechanism="PLAIN",
user='hive',
password='',
database='default')
cur = conn.cursor()
count = 0
print "Executing query: " + query
cur.execute(query)
print "Building query results..."
out = open(fileName, "w")
for row in cur.fetch():
out.write(fileLine%(row[0], row[1], row[2]))
count+=1
if count%1000000 == 0:
print "...wrote " + str(count) + " entries so far."
out.close()
print "...wrote " + str(count) + " entries into the file: " + fileName
print "\n"
|
[
"import pyhs2\nimport sys\nimport datetime\ni = datetime.datetime.now()\n\n# args\nif len(sys.argv) < 2:\n print \"Run with python version 2.6\"\n print \"Requires arg: <orgId>\"\n sys.exit()\n\norgId = sys.argv[1]\n\nprint \"\\n\\nCreating document external ID manifest for Org ID: \" + orgId\n\n## strings\nfileLine = \"%s\\t%s\\t%s\\n\" #external_id doc_source assign_authority\nquery = \"\"\"select * from (select external_id, doc_source, assign_authority from summary_doc_manifest where org_id = '%s'\n\t\tUNION ALL select get_json_object(line, '$.document.id') as external_id, get_json_object(line, '$.document.source') as doc_source,\n\t\tget_json_object(line, '$.document.assignAuthority') as assign_authority from production_logs_datacheckandrecover_epoch\n\t\twhere get_json_object(line, '$.docManifest') is not null and get_json_object(line, '$.orgId') = '%s'\n\t\tand day=%s and month=%s and year=2014) joined_table\"\"\" %(orgId, orgId, i.day, i.month) \nfileName = orgId + \"-manifest\"\n\n## hive connection\nconn = pyhs2.connect(host='10.196.47.205',\n port=10000,\n authMechanism=\"PLAIN\",\n user='hive',\n password='',\n database='default')\ncur = conn.cursor()\n\ncount = 0\n\nprint \"Executing query: \" + query\ncur.execute(query)\n\nprint \"Building query results...\"\nout = open(fileName, \"w\")\nfor row in cur.fetch():\n out.write(fileLine%(row[0], row[1], row[2]))\n count+=1\n if count%1000000 == 0:\n print \"...wrote \" + str(count) + \" entries so far.\"\nout.close()\n\nprint \"...wrote \" + str(count) + \" entries into the file: \" + fileName\nprint \"\\n\""
] | true |
580 |
ce26ad27b7729164e27c845e2803a670b506bad8
|
# -*- coding: utf-8 -*-
import scrapy
class QuoteesxtractorSpider(scrapy.Spider):
name = 'quoteEsxtractor'
allowed_domains = ['quotes.toscrape.com']
start_urls = ['http://quotes.toscrape.com/']
def parse(self, response):
for quote in response.css('.quote') :
# print(quote.getall())
result = {
"text": quote.css('span.text::text').get(),
"author": quote.css('small.author::text').get(),
"tags": quote.css('div.tags a.tag::text').getall()
}
yield result
|
[
"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass QuoteesxtractorSpider(scrapy.Spider):\n name = 'quoteEsxtractor'\n allowed_domains = ['quotes.toscrape.com']\n start_urls = ['http://quotes.toscrape.com/']\n\n def parse(self, response):\n for quote in response.css('.quote') :\n # print(quote.getall())\n result = {\n \"text\": quote.css('span.text::text').get(),\n \"author\": quote.css('small.author::text').get(),\n \"tags\": quote.css('div.tags a.tag::text').getall()\n }\n yield result",
"import scrapy\n\n\nclass QuoteesxtractorSpider(scrapy.Spider):\n name = 'quoteEsxtractor'\n allowed_domains = ['quotes.toscrape.com']\n start_urls = ['http://quotes.toscrape.com/']\n\n def parse(self, response):\n for quote in response.css('.quote'):\n result = {'text': quote.css('span.text::text').get(), 'author':\n quote.css('small.author::text').get(), 'tags': quote.css(\n 'div.tags a.tag::text').getall()}\n yield result\n",
"<import token>\n\n\nclass QuoteesxtractorSpider(scrapy.Spider):\n name = 'quoteEsxtractor'\n allowed_domains = ['quotes.toscrape.com']\n start_urls = ['http://quotes.toscrape.com/']\n\n def parse(self, response):\n for quote in response.css('.quote'):\n result = {'text': quote.css('span.text::text').get(), 'author':\n quote.css('small.author::text').get(), 'tags': quote.css(\n 'div.tags a.tag::text').getall()}\n yield result\n",
"<import token>\n\n\nclass QuoteesxtractorSpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def parse(self, response):\n for quote in response.css('.quote'):\n result = {'text': quote.css('span.text::text').get(), 'author':\n quote.css('small.author::text').get(), 'tags': quote.css(\n 'div.tags a.tag::text').getall()}\n yield result\n",
"<import token>\n\n\nclass QuoteesxtractorSpider(scrapy.Spider):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
581 |
09788cf04ab5190a33b43e3756f4dbd7d78977a5
|
# Copyright 2019-2020 the ProGraML authors.
#
# Contact Chris Cummins <[email protected]>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines a database for storing graph tuples."""
import datetime
import pathlib
import pickle
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import networkx as nx
import sqlalchemy as sql
from sqlalchemy.dialects import sqlite
from deeplearning.ml4pl import run_id
from deeplearning.ml4pl.graphs import programl_pb2
from deeplearning.ml4pl.graphs.labelled import graph_tuple as graph_tuple_lib
from labm8.py import app
from labm8.py import crypto
from labm8.py import decorators
from labm8.py import humanize
from labm8.py import jsonutil
from labm8.py import progress
from labm8.py import sqlutil
FLAGS = app.FLAGS
# Note we declare a graph_db flag at the bottom of this file, after declaring
# the Database class.
Base = sql.ext.declarative.declarative_base()
class Meta(Base, sqlutil.TablenameFromClassNameMixin):
"""A key-value database metadata store, with additional run ID."""
# Unused integer ID for this row.
id: int = sql.Column(sql.Integer, primary_key=True)
# The run ID that generated this <key,value> pair.
run_id: str = run_id.RunId.SqlStringColumn()
timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()
# The <key,value> pair.
key: str = sql.Column(sql.String(128), index=True)
pickled_value: bytes = sql.Column(
sqlutil.ColumnTypes.LargeBinary(), nullable=False
)
@property
def value(self) -> Any:
"""De-pickle the column value."""
return pickle.loads(self.pickled_value)
@classmethod
def Create(cls, key: str, value: Any):
"""Construct a table entry."""
return Meta(key=key, pickled_value=pickle.dumps(value))
class GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
"""A table of graph tuples.
For every GraphTuple, there should be a corresponding GraphTupleData row
containing the pickled graph tuple as a binary blob. The reason for dividing
the data horizontally across two tables is to enable fast scanning
of graph metadata, without needing to churn through a table of pickled binary
blobs.
"""
id: int = sql.Column(sql.Integer, primary_key=True)
# A reference to the 'id' column of a
# deeplearning.ml4pl.ir.ir_database.IntermediateRepresentationFile database
# row. There is no foreign key relationship here because they are separate
# databases.
ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)
# An integer used to split databases of graphs into separate graphs, e.g.
# train/val/test split.
split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)
# The size of the program graph.
node_count: int = sql.Column(sql.Integer, nullable=False)
control_edge_count: int = sql.Column(sql.Integer, nullable=False)
data_edge_count: int = sql.Column(sql.Integer, nullable=False)
call_edge_count: int = sql.Column(sql.Integer, nullable=False)
# The maximum value of the 'position' attribute of edges.
# Although this is an integral value, we store it as a float when using sqlite
# backend because for an unknown reason, sql.func.max(edge_position_max)
# returns a byte array when aggregating over sqlite backend.
edge_position_max: int = sql.Column(
sql.Integer().with_variant(sqlite.FLOAT(), "sqlite"), nullable=False
)
# The dimensionality of node-level features and labels.
node_x_dimensionality: int = sql.Column(
sql.Integer, default=0, nullable=False
)
node_y_dimensionality: int = sql.Column(
sql.Integer, default=0, nullable=False
)
# The dimensionality of graph-level features and labels.
graph_x_dimensionality: int = sql.Column(
sql.Integer, default=0, nullable=False
)
graph_y_dimensionality: int = sql.Column(
sql.Integer, default=0, nullable=False
)
# The size of the pickled graph tuple in bytes.
pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)
# A copy of attributes from the
# deeplearning.ml4pl.graphs.labelled.data_flow_graphs.DataFlowAnnotatedGraph
# tuple for storing metadata of data flow analysis graphs. If not relevant ,
# these columns may be null.
data_flow_steps: int = sql.Column(sql.Integer, nullable=True)
data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)
data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)
timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()
# Create the one-to-one relationship from GraphTuple to GraphTupleData.
data: "GraphTupleData" = sql.orm.relationship(
"GraphTupleData", uselist=False, cascade="all, delete-orphan"
)
@property
def has_data_flow(self) -> bool:
"""Returns whether graph tuple has data flow columns."""
return self.data_flow_steps is not None
@property
def edge_count(self) -> int:
return self.control_edge_count + self.data_edge_count + self.call_edge_count
# Joined table accessors:
@property
def sha1(self) -> str:
"""Return the sha1 of the graph tuple."""
return self.data.sha1
@decorators.memoized_property
def tuple(self) -> graph_tuple_lib.GraphTuple:
"""Un-pickle the graph tuple and cache the binary results."""
return pickle.loads(self.data.pickled_graph_tuple)
def ToFile(self, path: pathlib.Path) -> None:
"""Dump the pickled graph tuple to file.
This is lossy, as the ir_id column is not dumped.
Args:
path: The path of the graph tuple to write.
"""
with open(path, "wb") as f:
pickle.dump(self.tuple, f)
# Factory methods:
@classmethod
def FromFile(cls, path: pathlib.Path, ir_id: int):
"""Construct a mapped database instance from a file generated by ToFile().
Args:
path: The path of the file to read.
ir_id: The IR id of the graph tuple.
Returns:
A GraphTuple instance.
"""
with open(path, "rb") as f:
graph_tuple = pickle.load(f)
return cls.CreateFromGraphTuple(graph_tuple, ir_id)
@classmethod
def CreateFromGraphTuple(
cls,
graph_tuple: graph_tuple_lib.GraphTuple,
ir_id: int,
split: Optional[int] = None,
) -> "GraphTuple":
"""Create a mapped database instance from the given graph tuple.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
graph_tuple: The graph tuple to map.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
pickled_graph_tuple = pickle.dumps(graph_tuple)
return GraphTuple(
ir_id=ir_id,
split=split,
node_count=graph_tuple.node_count,
control_edge_count=graph_tuple.control_edge_count,
data_edge_count=graph_tuple.data_edge_count,
call_edge_count=graph_tuple.call_edge_count,
edge_position_max=graph_tuple.edge_position_max,
node_x_dimensionality=graph_tuple.node_x_dimensionality,
node_y_dimensionality=graph_tuple.node_y_dimensionality,
graph_x_dimensionality=graph_tuple.graph_x_dimensionality,
graph_y_dimensionality=graph_tuple.graph_y_dimensionality,
pickled_graph_tuple_size=len(pickled_graph_tuple),
data=GraphTupleData(
sha1=crypto.sha1(pickled_graph_tuple),
pickled_graph_tuple=pickled_graph_tuple,
),
)
@classmethod
def CreateFromNetworkX(
cls, g: nx.MultiDiGraph, ir_id: int, split: Optional[int] = None,
) -> "GraphTuple":
"""Create a mapped database instance from the given networkx graph.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
g: The networkx graph.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)
mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split)
mapped.data_flow_steps = g.graph.get("data_flow_steps")
mapped.data_flow_root_node = g.graph.get("data_flow_root_node")
mapped.data_flow_positive_node_count = g.graph.get(
"data_flow_positive_node_count"
)
return mapped
@classmethod
def CreateEmpty(cls, ir_id: int) -> "GraphTuple":
"""Create an "empty" graph tuple.
An empty graph tuple can be used to signal that the conversion to GraphTuple
failed, and is signalled by a node_count of 0. An empty graph tuple has
no corresponding GraphTupleData row.
"""
return GraphTuple(
ir_id=ir_id,
node_count=0,
control_edge_count=0,
data_edge_count=0,
call_edge_count=0,
edge_position_max=0,
pickled_graph_tuple_size=0,
)
@classmethod
def CreateFromProgramGraph(
cls,
program_graph: programl_pb2.ProgramGraph,
ir_id: int,
split: Optional[int] = None,
) -> "GraphTuple":
"""Create a mapped database instance from the given annotated graph.
This is the preferred method of populating databases of graph tuples, as
it contains the boilerplate to extract and set the metadata columns, and
handles the join between the two data/metadata tables invisibly.
Args:
annotated_graph: A DataFlowAnnotatedGraph instance.
ir_id: The intermediate representation ID.
split: The split value of this graph.
Returns:
A GraphTuple instance.
"""
graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(
program_graph
)
mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)
mapped.data_flow_steps = program_graph.data_flow_steps
mapped.data_flow_root_node = program_graph.data_flow_root_node
mapped.data_flow_positive_node_count = (
program_graph.data_flow_positive_node_count
)
return mapped
class GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):
"""The pickled graph tuple data. See GraphTuple for the parent table."""
id: int = sql.Column(
sql.Integer,
sql.ForeignKey("graph_tuples.id", onupdate="CASCADE", ondelete="CASCADE"),
primary_key=True,
)
# The sha1sum of the 'pickled_graph_tuple' column. There is no requirement
# that graph tuples be unique, but, should you wish to enforce this,
# you can group by this sha1 column and prune the duplicates.
sha1: str = sql.Column(sql.String(40), nullable=False, index=True)
# The pickled GraphTuple data.
pickled_graph_tuple: bytes = sql.Column(
sqlutil.ColumnTypes.LargeBinary(), nullable=False
)
# A registry of database statics, where each entry is a <name, property> tuple.
database_statistics_registry: List[Tuple[str, Callable[["Database"], Any]]] = []
def database_statistic(func):
"""A decorator to mark a method on a Database as a database static.
Database statistics can be accessed using Database.stats_json property to
retrieve a <name, vale> dictionary.
"""
global database_statistics_registry
database_statistics_registry.append((func.__name__, func))
return property(func)
class Database(sqlutil.Database):
"""A database of GraphTuples."""
def __init__(
self,
url: str,
must_exist: bool = False,
ctx: progress.ProgressContext = progress.NullContext,
):
super(Database, self).__init__(url, Base, must_exist=must_exist)
self.ctx = ctx
# Lazily evaluated attributes.
self._graph_tuple_stats = None
self._splits = None
self._split_counts = None
##############################################################################
# Database stats. These are evaluated lazily and the results cached. There is
# no cache invalidation strategy - after modifying the database, you must
# manually call RefreshStats() to ensure that stale stats are re-computed.
##############################################################################
@database_statistic
def graph_count(self) -> int:
"""The number of non-empty graphs in the database."""
return int(self.graph_tuple_stats.graph_count)
@database_statistic
def ir_count(self) -> int:
"""The number of distinct intermediate representations that the non-empty
graphs are constructed from.
"""
return int(self.graph_tuple_stats.ir_count or 0)
@database_statistic
def split_count(self) -> int:
"""The number of distinct splits in the database."""
return int(self.graph_tuple_stats.split_count or 0)
@database_statistic
def node_count(self) -> int:
"""The total node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count or 0)
@database_statistic
def edge_count(self) -> int:
"""The total edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count or 0)
@database_statistic
def control_edge_count(self) -> int:
"""The total control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count or 0)
@database_statistic
def data_edge_count(self) -> int:
"""The total data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count or 0)
@database_statistic
def call_edge_count(self) -> int:
"""The total call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count or 0)
@database_statistic
def node_count_max(self) -> int:
"""The maximum node count in non-empty graphs."""
return int(self.graph_tuple_stats.node_count_max or 0)
@database_statistic
def edge_count_max(self) -> int:
"""The maximum edge count in non-empty graphs."""
return int(self.graph_tuple_stats.edge_count_max or 0)
@database_statistic
def control_edge_count_max(self) -> int:
"""The maximum control edge count in non-empty graphs."""
return int(self.graph_tuple_stats.control_edge_count_max or 0)
@database_statistic
def data_edge_count_max(self) -> int:
"""The maximum data edge count in non-empty graphs."""
return int(self.graph_tuple_stats.data_edge_count_max or 0)
@database_statistic
def call_edge_count_max(self) -> int:
"""The maximum call edge count in non-empty graphs."""
return int(self.graph_tuple_stats.call_edge_count_max or 0)
@database_statistic
def edge_position_max(self) -> int:
"""The maximum edge position in non-empty graphs."""
return int(self.graph_tuple_stats.edge_position_max or 0)
@database_statistic
def node_x_dimensionality(self) -> int:
"""The node x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_x_dimensionality or 0)
@database_statistic
def node_y_dimensionality(self) -> int:
"""The node y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.node_y_dimensionality or 0)
@database_statistic
def graph_x_dimensionality(self) -> int:
"""The graph x dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_x_dimensionality or 0)
@database_statistic
def graph_y_dimensionality(self) -> int:
"""The graph y dimensionality of all non-empty graphs."""
return int(self.graph_tuple_stats.graph_y_dimensionality or 0)
@database_statistic
def graph_data_size(self) -> int:
"""The total size of the non-empty graph data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size or 0)
@database_statistic
def graph_data_size_min(self) -> int:
"""The minimum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_min or 0)
@database_statistic
def graph_data_size_avg(self) -> float:
"""The average size of the non-empty graph tuple data, in bytes."""
return float(self.graph_tuple_stats.graph_data_size_avg or 0)
@database_statistic
def graph_data_size_max(self) -> int:
"""The maximum size of the non-empty graph tuple data, in bytes."""
return int(self.graph_tuple_stats.graph_data_size_max or 0)
@database_statistic
def has_data_flow(self) -> bool:
"""Return whether the graph database has data flow annotations.
This is only true if *all* columns have data flow values.
"""
return self.graph_count and not self.data_flow_null_count
@database_statistic
def data_flow_null_count(self) -> int:
"""The number of database rows without data flow information.
If > 0, then has_data_flow is False.
"""
return self.graph_count - int(
self.graph_tuple_stats.data_flow_steps_count or 0
)
@database_statistic
def data_flow_steps_min(self) -> Optional[int]:
"""The minimum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_min or 0)
@database_statistic
def data_flow_steps_avg(self) -> Optional[float]:
"""The average data flow steps for non-empty graphs."""
if self.has_data_flow:
return float(self.graph_tuple_stats.data_flow_steps_avg)
@database_statistic
def data_flow_steps_max(self) -> Optional[int]:
"""The maximum data flow steps for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_steps_max or 0)
@database_statistic
def data_flow_positive_node_count_min(self) -> Optional[int]:
"""The minimum data flow positive node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_positive_node_count_min or 0)
@database_statistic
def data_flow_positive_node_count_avg(self) -> Optional[int]:
"""The minimum data flow average node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_positive_node_count_avg or 0)
@database_statistic
def data_flow_positive_node_count_max(self) -> Optional[int]:
"""The minimum data flow max node count for non-empty graphs."""
if self.has_data_flow:
return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)
@database_statistic
def splits(self) -> List[int]:
"""Return a list of unique split values."""
if self._splits is None:
self.RefreshStats()
return self._splits
@database_statistic
def split_counts(self) -> Dict[int, int]:
"""Return a dictionary mapping split to the number of graphs."""
if self._split_counts is None:
self.RefreshStats()
return self._split_counts
def RefreshStats(self):
"""Compute the database stats for access via the instance properties.
Raises:
ValueError: If the database contains invalid entries, e.g. inconsistent
vector dimensionalities.
"""
with self.ctx.Profile(
2,
lambda t: (
"Computed stats over "
f"{humanize.BinaryPrefix(stats.graph_data_size, 'B')} database "
f"({humanize.Plural(stats.graph_count, 'graph')})"
),
), self.Session() as session:
query = session.query(
# Graph and IR counts.
sql.func.count(GraphTuple.id).label("graph_count"),
sql.func.count(sql.func.distinct(GraphTuple.ir_id)).label("ir_count"),
sql.func.count(sql.func.distinct(GraphTuple.split)).label(
"split_count"
),
# Node and edge attribute sums.
sql.func.sum(GraphTuple.node_count).label("node_count"),
sql.func.sum(GraphTuple.control_edge_count).label("control_edge_count"),
sql.func.sum(GraphTuple.data_edge_count).label("data_edge_count"),
sql.func.sum(GraphTuple.call_edge_count).label("call_edge_count"),
sql.func.sum(
GraphTuple.control_edge_count
+ GraphTuple.data_edge_count
+ GraphTuple.call_edge_count
).label("edge_count"),
# Node and edge attribute maximums.
sql.func.max(GraphTuple.node_count).label("node_count_max"),
sql.func.max(GraphTuple.control_edge_count).label(
"control_edge_count_max"
),
sql.func.max(GraphTuple.data_edge_count).label("data_edge_count_max"),
sql.func.max(GraphTuple.call_edge_count).label("call_edge_count_max"),
sql.func.max(GraphTuple.call_edge_count).label("call_edge_count_max"),
sql.func.max(
GraphTuple.control_edge_count
+ GraphTuple.data_edge_count
+ GraphTuple.call_edge_count
).label("edge_count_max"),
sql.func.max(GraphTuple.edge_position_max).label("edge_position_max"),
# Feature and label dimensionality counts. Each of these columns
# should be one, showing that there is a single value for all graph
# tuples.
sql.func.count(
sql.func.distinct(GraphTuple.node_x_dimensionality)
).label("node_x_dimensionality_count"),
sql.func.count(
sql.func.distinct(GraphTuple.node_y_dimensionality)
).label("node_y_dimensionality_count"),
sql.func.count(
sql.func.distinct(GraphTuple.graph_x_dimensionality)
).label("graph_x_dimensionality_count"),
sql.func.count(
sql.func.distinct(GraphTuple.graph_y_dimensionality)
).label("graph_y_dimensionality_count"),
# Feature and label dimensionalities.
sql.func.max(GraphTuple.node_x_dimensionality).label(
"node_x_dimensionality"
),
sql.func.max(GraphTuple.node_y_dimensionality).label(
"node_y_dimensionality"
),
sql.func.max(GraphTuple.graph_x_dimensionality).label(
"graph_x_dimensionality"
),
sql.func.max(GraphTuple.graph_y_dimensionality).label(
"graph_y_dimensionality"
),
# Graph tuple sizes.
sql.func.sum(GraphTuple.pickled_graph_tuple_size).label(
"graph_data_size"
),
sql.func.min(GraphTuple.pickled_graph_tuple_size).label(
"graph_data_size_min"
),
sql.func.avg(GraphTuple.pickled_graph_tuple_size).label(
"graph_data_size_avg"
),
sql.func.max(GraphTuple.pickled_graph_tuple_size).label(
"graph_data_size_max"
),
# Data flow column null counts.
sql.func.count(GraphTuple.data_flow_steps).label(
"data_flow_steps_count"
),
# Data flow step counts.
sql.func.min(GraphTuple.data_flow_steps).label("data_flow_steps_min"),
sql.func.avg(GraphTuple.data_flow_steps).label("data_flow_steps_avg"),
sql.func.max(GraphTuple.data_flow_steps).label("data_flow_steps_max"),
# Data flow positive node count.
sql.func.min(GraphTuple.data_flow_positive_node_count).label(
"data_flow_positive_node_count_min"
),
sql.func.avg(GraphTuple.data_flow_positive_node_count).label(
"data_flow_positive_node_count_avg"
),
sql.func.max(GraphTuple.data_flow_positive_node_count).label(
"data_flow_positive_node_count_max"
),
)
# Ignore "empty" graphs.
query = query.filter(GraphTuple.node_count > 1)
# Compute the stats.
stats = query.one()
# Check that databases have a consistent value for dimensionalities.
if stats.node_x_dimensionality_count > 1:
raise ValueError(
f"Database contains {stats.node_x_dimensionality_count} "
"distinct node x dimensionalities"
)
if stats.node_y_dimensionality_count > 1:
raise ValueError(
f"Database contains {stats.node_y_dimensionality_count} "
"distinct node y dimensionalities"
)
if stats.graph_x_dimensionality_count > 1:
raise ValueError(
f"Database contains {stats.graph_x_dimensionality_count} "
"distinct graph x dimensionalities"
)
if stats.graph_y_dimensionality_count > 1:
raise ValueError(
f"Database contains {stats.graph_y_dimensionality_count} "
"distinct graph y dimensionalities"
)
# Check that every graph has data flow attributes, or none of them do.
if not (
stats.data_flow_steps_count == 0
or stats.data_flow_steps_count == stats.graph_count
):
raise ValueError(
f"{stats.graph_count - stats.data_flow_steps_count} of "
f"{stats.graph_count} graphs have no data_flow_steps "
"value"
)
self._graph_tuple_stats = stats
with self.Session() as session:
self._splits = sorted(
set(
[
row.split
for row in session.query(GraphTuple.split)
.filter(GraphTuple.split != None)
.group_by(GraphTuple.split)
]
)
)
self._split_counts = {
split: session.query(sql.func.count(GraphTuple.id))
.filter(GraphTuple.split == split)
.scalar()
for split in self._splits
}
@property
def graph_tuple_stats(self):
"""Fetch aggregate graph tuple stats, or compute them if not set."""
if self._graph_tuple_stats is None:
self.RefreshStats()
return self._graph_tuple_stats
@property
def stats_json(self) -> Dict[str, Any]:
"""Fetch the database statics as a JSON dictionary."""
return {
name: function(self) for name, function in database_statistics_registry
}
def __repr__(self) -> str:
return (
f"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with "
f"dimensionalities: node_x={self.node_x_dimensionality}, "
f"node_y={self.node_y_dimensionality}, "
f"graph_x={self.graph_x_dimensionality}, "
f"graph_y={self.graph_y_dimensionality}."
)
# Deferred declaration of flags because we need to reference Database class.
app.DEFINE_database(
"graph_db", Database, None, "The database to read graph tuples from.",
)
def Main():
"""Main entry point."""
graph_db = FLAGS.graph_db()
print(jsonutil.format_json(graph_db.stats_json))
if __name__ == "__main__":
app.Run(Main)
|
[
"# Copyright 2019-2020 the ProGraML authors.\n#\n# Contact Chris Cummins <[email protected]>.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module defines a database for storing graph tuples.\"\"\"\nimport datetime\nimport pathlib\nimport pickle\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\n\nimport networkx as nx\nimport sqlalchemy as sql\nfrom sqlalchemy.dialects import sqlite\n\nfrom deeplearning.ml4pl import run_id\nfrom deeplearning.ml4pl.graphs import programl_pb2\nfrom deeplearning.ml4pl.graphs.labelled import graph_tuple as graph_tuple_lib\nfrom labm8.py import app\nfrom labm8.py import crypto\nfrom labm8.py import decorators\nfrom labm8.py import humanize\nfrom labm8.py import jsonutil\nfrom labm8.py import progress\nfrom labm8.py import sqlutil\n\n\nFLAGS = app.FLAGS\n# Note we declare a graph_db flag at the bottom of this file, after declaring\n# the Database class.\n\nBase = sql.ext.declarative.declarative_base()\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n \"\"\"A key-value database metadata store, with additional run ID.\"\"\"\n\n # Unused integer ID for this row.\n id: int = sql.Column(sql.Integer, primary_key=True)\n\n # The run ID that generated this <key,value> pair.\n run_id: str = run_id.RunId.SqlStringColumn()\n\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n\n # The <key,value> pair.\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(\n sqlutil.ColumnTypes.LargeBinary(), nullable=False\n )\n\n @property\n def value(self) -> Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n\n id: int = sql.Column(sql.Integer, primary_key=True)\n\n # A reference to the 'id' column of a\n # deeplearning.ml4pl.ir.ir_database.IntermediateRepresentationFile database\n # row. There is no foreign key relationship here because they are separate\n # databases.\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n\n # An integer used to split databases of graphs into separate graphs, e.g.\n # train/val/test split.\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n\n # The size of the program graph.\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n\n # The maximum value of the 'position' attribute of edges.\n # Although this is an integral value, we store it as a float when using sqlite\n # backend because for an unknown reason, sql.func.max(edge_position_max)\n # returns a byte array when aggregating over sqlite backend.\n edge_position_max: int = sql.Column(\n sql.Integer().with_variant(sqlite.FLOAT(), \"sqlite\"), nullable=False\n )\n\n # The dimensionality of node-level features and labels.\n node_x_dimensionality: int = sql.Column(\n sql.Integer, default=0, nullable=False\n )\n node_y_dimensionality: int = sql.Column(\n sql.Integer, default=0, nullable=False\n )\n\n # The dimensionality of graph-level features and labels.\n graph_x_dimensionality: int = sql.Column(\n sql.Integer, default=0, nullable=False\n )\n graph_y_dimensionality: int = sql.Column(\n sql.Integer, default=0, nullable=False\n )\n\n # The size of the pickled graph tuple in bytes.\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n\n # A copy of attributes from the\n # deeplearning.ml4pl.graphs.labelled.data_flow_graphs.DataFlowAnnotatedGraph\n # tuple for storing metadata of data flow analysis graphs. If not relevant ,\n # these columns may be null.\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n\n # Create the one-to-one relationship from GraphTuple to GraphTupleData.\n data: \"GraphTupleData\" = sql.orm.relationship(\n \"GraphTupleData\", uselist=False, cascade=\"all, delete-orphan\"\n )\n\n @property\n def has_data_flow(self) -> bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) -> int:\n return self.control_edge_count + self.data_edge_count + self.call_edge_count\n\n # Joined table accessors:\n\n @property\n def sha1(self) -> str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) -> graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) -> None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, \"wb\") as f:\n pickle.dump(self.tuple, f)\n\n # Factory methods:\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, \"rb\") as f:\n graph_tuple = pickle.load(f)\n\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(\n cls,\n graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int,\n split: Optional[int] = None,\n ) -> \"GraphTuple\":\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(\n ir_id=ir_id,\n split=split,\n node_count=graph_tuple.node_count,\n control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count,\n call_edge_count=graph_tuple.call_edge_count,\n edge_position_max=graph_tuple.edge_position_max,\n node_x_dimensionality=graph_tuple.node_x_dimensionality,\n node_y_dimensionality=graph_tuple.node_y_dimensionality,\n graph_x_dimensionality=graph_tuple.graph_x_dimensionality,\n graph_y_dimensionality=graph_tuple.graph_y_dimensionality,\n pickled_graph_tuple_size=len(pickled_graph_tuple),\n data=GraphTupleData(\n sha1=crypto.sha1(pickled_graph_tuple),\n pickled_graph_tuple=pickled_graph_tuple,\n ),\n )\n\n @classmethod\n def CreateFromNetworkX(\n cls, g: nx.MultiDiGraph, ir_id: int, split: Optional[int] = None,\n ) -> \"GraphTuple\":\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split)\n mapped.data_flow_steps = g.graph.get(\"data_flow_steps\")\n mapped.data_flow_root_node = g.graph.get(\"data_flow_root_node\")\n mapped.data_flow_positive_node_count = g.graph.get(\n \"data_flow_positive_node_count\"\n )\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) -> \"GraphTuple\":\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(\n ir_id=ir_id,\n node_count=0,\n control_edge_count=0,\n data_edge_count=0,\n call_edge_count=0,\n edge_position_max=0,\n pickled_graph_tuple_size=0,\n )\n\n @classmethod\n def CreateFromProgramGraph(\n cls,\n program_graph: programl_pb2.ProgramGraph,\n ir_id: int,\n split: Optional[int] = None,\n ) -> \"GraphTuple\":\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph\n )\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (\n program_graph.data_flow_positive_node_count\n )\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n\n id: int = sql.Column(\n sql.Integer,\n sql.ForeignKey(\"graph_tuples.id\", onupdate=\"CASCADE\", ondelete=\"CASCADE\"),\n primary_key=True,\n )\n\n # The sha1sum of the 'pickled_graph_tuple' column. There is no requirement\n # that graph tuples be unique, but, should you wish to enforce this,\n # you can group by this sha1 column and prune the duplicates.\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n\n # The pickled GraphTuple data.\n pickled_graph_tuple: bytes = sql.Column(\n sqlutil.ColumnTypes.LargeBinary(), nullable=False\n )\n\n\n# A registry of database statics, where each entry is a <name, property> tuple.\ndatabase_statistics_registry: List[Tuple[str, Callable[[\"Database\"], Any]]] = []\n\n\ndef database_statistic(func):\n \"\"\"A decorator to mark a method on a Database as a database static.\n\n Database statistics can be accessed using Database.stats_json property to\n retrieve a <name, vale> dictionary.\n \"\"\"\n global database_statistics_registry\n database_statistics_registry.append((func.__name__, func))\n return property(func)\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(\n self,\n url: str,\n must_exist: bool = False,\n ctx: progress.ProgressContext = progress.NullContext,\n ):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n\n # Lazily evaluated attributes.\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n ##############################################################################\n # Database stats. These are evaluated lazily and the results cached. There is\n # no cache invalidation strategy - after modifying the database, you must\n # manually call RefreshStats() to ensure that stale stats are re-computed.\n ##############################################################################\n\n @database_statistic\n def graph_count(self) -> int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) -> int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) -> int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) -> int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) -> int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) -> int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) -> int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) -> int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) -> int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) -> int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) -> int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) -> int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) -> int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) -> int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) -> int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) -> int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) -> int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) -> int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) -> int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) -> int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) -> float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) -> int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) -> bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) -> int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(\n self.graph_tuple_stats.data_flow_steps_count or 0\n )\n\n @database_statistic\n def data_flow_steps_min(self) -> Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) -> Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) -> Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) -> Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) -> Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) -> Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) -> List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) -> Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(\n 2,\n lambda t: (\n \"Computed stats over \"\n f\"{humanize.BinaryPrefix(stats.graph_data_size, 'B')} database \"\n f\"({humanize.Plural(stats.graph_count, 'graph')})\"\n ),\n ), self.Session() as session:\n query = session.query(\n # Graph and IR counts.\n sql.func.count(GraphTuple.id).label(\"graph_count\"),\n sql.func.count(sql.func.distinct(GraphTuple.ir_id)).label(\"ir_count\"),\n sql.func.count(sql.func.distinct(GraphTuple.split)).label(\n \"split_count\"\n ),\n # Node and edge attribute sums.\n sql.func.sum(GraphTuple.node_count).label(\"node_count\"),\n sql.func.sum(GraphTuple.control_edge_count).label(\"control_edge_count\"),\n sql.func.sum(GraphTuple.data_edge_count).label(\"data_edge_count\"),\n sql.func.sum(GraphTuple.call_edge_count).label(\"call_edge_count\"),\n sql.func.sum(\n GraphTuple.control_edge_count\n + GraphTuple.data_edge_count\n + GraphTuple.call_edge_count\n ).label(\"edge_count\"),\n # Node and edge attribute maximums.\n sql.func.max(GraphTuple.node_count).label(\"node_count_max\"),\n sql.func.max(GraphTuple.control_edge_count).label(\n \"control_edge_count_max\"\n ),\n sql.func.max(GraphTuple.data_edge_count).label(\"data_edge_count_max\"),\n sql.func.max(GraphTuple.call_edge_count).label(\"call_edge_count_max\"),\n sql.func.max(GraphTuple.call_edge_count).label(\"call_edge_count_max\"),\n sql.func.max(\n GraphTuple.control_edge_count\n + GraphTuple.data_edge_count\n + GraphTuple.call_edge_count\n ).label(\"edge_count_max\"),\n sql.func.max(GraphTuple.edge_position_max).label(\"edge_position_max\"),\n # Feature and label dimensionality counts. Each of these columns\n # should be one, showing that there is a single value for all graph\n # tuples.\n sql.func.count(\n sql.func.distinct(GraphTuple.node_x_dimensionality)\n ).label(\"node_x_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.node_y_dimensionality)\n ).label(\"node_y_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.graph_x_dimensionality)\n ).label(\"graph_x_dimensionality_count\"),\n sql.func.count(\n sql.func.distinct(GraphTuple.graph_y_dimensionality)\n ).label(\"graph_y_dimensionality_count\"),\n # Feature and label dimensionalities.\n sql.func.max(GraphTuple.node_x_dimensionality).label(\n \"node_x_dimensionality\"\n ),\n sql.func.max(GraphTuple.node_y_dimensionality).label(\n \"node_y_dimensionality\"\n ),\n sql.func.max(GraphTuple.graph_x_dimensionality).label(\n \"graph_x_dimensionality\"\n ),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n \"graph_y_dimensionality\"\n ),\n # Graph tuple sizes.\n sql.func.sum(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size\"\n ),\n sql.func.min(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_min\"\n ),\n sql.func.avg(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_avg\"\n ),\n sql.func.max(GraphTuple.pickled_graph_tuple_size).label(\n \"graph_data_size_max\"\n ),\n # Data flow column null counts.\n sql.func.count(GraphTuple.data_flow_steps).label(\n \"data_flow_steps_count\"\n ),\n # Data flow step counts.\n sql.func.min(GraphTuple.data_flow_steps).label(\"data_flow_steps_min\"),\n sql.func.avg(GraphTuple.data_flow_steps).label(\"data_flow_steps_avg\"),\n sql.func.max(GraphTuple.data_flow_steps).label(\"data_flow_steps_max\"),\n # Data flow positive node count.\n sql.func.min(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_min\"\n ),\n sql.func.avg(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_avg\"\n ),\n sql.func.max(GraphTuple.data_flow_positive_node_count).label(\n \"data_flow_positive_node_count_max\"\n ),\n )\n\n # Ignore \"empty\" graphs.\n query = query.filter(GraphTuple.node_count > 1)\n\n # Compute the stats.\n stats = query.one()\n\n # Check that databases have a consistent value for dimensionalities.\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.node_x_dimensionality_count} \"\n \"distinct node x dimensionalities\"\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.node_y_dimensionality_count} \"\n \"distinct node y dimensionalities\"\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.graph_x_dimensionality_count} \"\n \"distinct graph x dimensionalities\"\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f\"Database contains {stats.graph_y_dimensionality_count} \"\n \"distinct graph y dimensionalities\"\n )\n\n # Check that every graph has data flow attributes, or none of them do.\n if not (\n stats.data_flow_steps_count == 0\n or stats.data_flow_steps_count == stats.graph_count\n ):\n raise ValueError(\n f\"{stats.graph_count - stats.data_flow_steps_count} of \"\n f\"{stats.graph_count} graphs have no data_flow_steps \"\n \"value\"\n )\n\n self._graph_tuple_stats = stats\n\n with self.Session() as session:\n self._splits = sorted(\n set(\n [\n row.split\n for row in session.query(GraphTuple.split)\n .filter(GraphTuple.split != None)\n .group_by(GraphTuple.split)\n ]\n )\n )\n\n self._split_counts = {\n split: session.query(sql.func.count(GraphTuple.id))\n .filter(GraphTuple.split == split)\n .scalar()\n for split in self._splits\n }\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) -> Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {\n name: function(self) for name, function in database_statistics_registry\n }\n\n def __repr__(self) -> str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with \"\n f\"dimensionalities: node_x={self.node_x_dimensionality}, \"\n f\"node_y={self.node_y_dimensionality}, \"\n f\"graph_x={self.graph_x_dimensionality}, \"\n f\"graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n# Deferred declaration of flags because we need to reference Database class.\napp.DEFINE_database(\n \"graph_db\", Database, None, \"The database to read graph tuples from.\",\n)\n\n\ndef Main():\n \"\"\"Main entry point.\"\"\"\n graph_db = FLAGS.graph_db()\n print(jsonutil.format_json(graph_db.stats_json))\n\n\nif __name__ == \"__main__\":\n app.Run(Main)\n",
"<docstring token>\nimport datetime\nimport pathlib\nimport pickle\nfrom typing import Any\nfrom typing import Callable\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom typing import Tuple\nimport networkx as nx\nimport sqlalchemy as sql\nfrom sqlalchemy.dialects import sqlite\nfrom deeplearning.ml4pl import run_id\nfrom deeplearning.ml4pl.graphs import programl_pb2\nfrom deeplearning.ml4pl.graphs.labelled import graph_tuple as graph_tuple_lib\nfrom labm8.py import app\nfrom labm8.py import crypto\nfrom labm8.py import decorators\nfrom labm8.py import humanize\nfrom labm8.py import jsonutil\nfrom labm8.py import progress\nfrom labm8.py import sqlutil\nFLAGS = app.FLAGS\nBase = sql.ext.declarative.declarative_base()\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n \"\"\"A key-value database metadata store, with additional run ID.\"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n\n @property\n def value(self) ->Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\ndatabase_statistics_registry: List[Tuple[str, Callable[['Database'], Any]]] = [\n ]\n\n\ndef database_statistic(func):\n \"\"\"A decorator to mark a method on a Database as a database static.\n\n Database statistics can be accessed using Database.stats_json property to\n retrieve a <name, vale> dictionary.\n \"\"\"\n global database_statistics_registry\n database_statistics_registry.append((func.__name__, func))\n return property(func)\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\napp.DEFINE_database('graph_db', Database, None,\n 'The database to read graph tuples from.')\n\n\ndef Main():\n \"\"\"Main entry point.\"\"\"\n graph_db = FLAGS.graph_db()\n print(jsonutil.format_json(graph_db.stats_json))\n\n\nif __name__ == '__main__':\n app.Run(Main)\n",
"<docstring token>\n<import token>\nFLAGS = app.FLAGS\nBase = sql.ext.declarative.declarative_base()\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n \"\"\"A key-value database metadata store, with additional run ID.\"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n\n @property\n def value(self) ->Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\ndatabase_statistics_registry: List[Tuple[str, Callable[['Database'], Any]]] = [\n ]\n\n\ndef database_statistic(func):\n \"\"\"A decorator to mark a method on a Database as a database static.\n\n Database statistics can be accessed using Database.stats_json property to\n retrieve a <name, vale> dictionary.\n \"\"\"\n global database_statistics_registry\n database_statistics_registry.append((func.__name__, func))\n return property(func)\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\napp.DEFINE_database('graph_db', Database, None,\n 'The database to read graph tuples from.')\n\n\ndef Main():\n \"\"\"Main entry point.\"\"\"\n graph_db = FLAGS.graph_db()\n print(jsonutil.format_json(graph_db.stats_json))\n\n\nif __name__ == '__main__':\n app.Run(Main)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n \"\"\"A key-value database metadata store, with additional run ID.\"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n\n @property\n def value(self) ->Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\ndatabase_statistics_registry: List[Tuple[str, Callable[['Database'], Any]]] = [\n ]\n\n\ndef database_statistic(func):\n \"\"\"A decorator to mark a method on a Database as a database static.\n\n Database statistics can be accessed using Database.stats_json property to\n retrieve a <name, vale> dictionary.\n \"\"\"\n global database_statistics_registry\n database_statistics_registry.append((func.__name__, func))\n return property(func)\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\napp.DEFINE_database('graph_db', Database, None,\n 'The database to read graph tuples from.')\n\n\ndef Main():\n \"\"\"Main entry point.\"\"\"\n graph_db = FLAGS.graph_db()\n print(jsonutil.format_json(graph_db.stats_json))\n\n\nif __name__ == '__main__':\n app.Run(Main)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n \"\"\"A key-value database metadata store, with additional run ID.\"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n\n @property\n def value(self) ->Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n\n\ndef database_statistic(func):\n \"\"\"A decorator to mark a method on a Database as a database static.\n\n Database statistics can be accessed using Database.stats_json property to\n retrieve a <name, vale> dictionary.\n \"\"\"\n global database_statistics_registry\n database_statistics_registry.append((func.__name__, func))\n return property(func)\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n\n\ndef Main():\n \"\"\"Main entry point.\"\"\"\n graph_db = FLAGS.graph_db()\n print(jsonutil.format_json(graph_db.stats_json))\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n \"\"\"A key-value database metadata store, with additional run ID.\"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n\n @property\n def value(self) ->Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n\n\ndef database_statistic(func):\n \"\"\"A decorator to mark a method on a Database as a database static.\n\n Database statistics can be accessed using Database.stats_json property to\n retrieve a <name, vale> dictionary.\n \"\"\"\n global database_statistics_registry\n database_statistics_registry.append((func.__name__, func))\n return property(func)\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n \"\"\"A key-value database metadata store, with additional run ID.\"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n\n @property\n def value(self) ->Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n\n @property\n def value(self) ->Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n\n @classmethod\n def Create(cls, key: str, value: Any):\n \"\"\"Construct a table entry.\"\"\"\n return Meta(key=key, pickled_value=pickle.dumps(value))\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n\n @property\n def value(self) ->Any:\n \"\"\"De-pickle the column value.\"\"\"\n return pickle.loads(self.pickled_value)\n <function token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Meta(Base, sqlutil.TablenameFromClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n run_id: str = run_id.RunId.SqlStringColumn()\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n key: str = sql.Column(sql.String(128), index=True)\n pickled_value: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary(),\n nullable=False)\n <function token>\n <function token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"A table of graph tuples.\n\n For every GraphTuple, there should be a corresponding GraphTupleData row\n containing the pickled graph tuple as a binary blob. The reason for dividing\n the data horizontally across two tables is to enable fast scanning\n of graph metadata, without needing to churn through a table of pickled binary\n blobs.\n \"\"\"\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n\n @classmethod\n def FromFile(cls, path: pathlib.Path, ir_id: int):\n \"\"\"Construct a mapped database instance from a file generated by ToFile().\n\n Args:\n path: The path of the file to read.\n ir_id: The IR id of the graph tuple.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n with open(path, 'rb') as f:\n graph_tuple = pickle.load(f)\n return cls.CreateFromGraphTuple(graph_tuple, ir_id)\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n\n @property\n def edge_count(self) ->int:\n return (self.control_edge_count + self.data_edge_count + self.\n call_edge_count)\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n <function token>\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n <function token>\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n <function token>\n\n @classmethod\n def CreateFromGraphTuple(cls, graph_tuple: graph_tuple_lib.GraphTuple,\n ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given graph tuple.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n graph_tuple: The graph tuple to map.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n pickled_graph_tuple = pickle.dumps(graph_tuple)\n return GraphTuple(ir_id=ir_id, split=split, node_count=graph_tuple.\n node_count, control_edge_count=graph_tuple.control_edge_count,\n data_edge_count=graph_tuple.data_edge_count, call_edge_count=\n graph_tuple.call_edge_count, edge_position_max=graph_tuple.\n edge_position_max, node_x_dimensionality=graph_tuple.\n node_x_dimensionality, node_y_dimensionality=graph_tuple.\n node_y_dimensionality, graph_x_dimensionality=graph_tuple.\n graph_x_dimensionality, graph_y_dimensionality=graph_tuple.\n graph_y_dimensionality, pickled_graph_tuple_size=len(\n pickled_graph_tuple), data=GraphTupleData(sha1=crypto.sha1(\n pickled_graph_tuple), pickled_graph_tuple=pickled_graph_tuple))\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n <function token>\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n <function token>\n <function token>\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n\n @classmethod\n def CreateEmpty(cls, ir_id: int) ->'GraphTuple':\n \"\"\"Create an \"empty\" graph tuple.\n\n An empty graph tuple can be used to signal that the conversion to GraphTuple\n failed, and is signalled by a node_count of 0. An empty graph tuple has\n no corresponding GraphTupleData row.\n \"\"\"\n return GraphTuple(ir_id=ir_id, node_count=0, control_edge_count=0,\n data_edge_count=0, call_edge_count=0, edge_position_max=0,\n pickled_graph_tuple_size=0)\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n <function token>\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n\n @decorators.memoized_property\n def tuple(self) ->graph_tuple_lib.GraphTuple:\n \"\"\"Un-pickle the graph tuple and cache the binary results.\"\"\"\n return pickle.loads(self.data.pickled_graph_tuple)\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n <function token>\n <function token>\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n <function token>\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n <function token>\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n <function token>\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n <function token>\n <function token>\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n <function token>\n\n @classmethod\n def CreateFromProgramGraph(cls, program_graph: programl_pb2.\n ProgramGraph, ir_id: int, split: Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given annotated graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n annotated_graph: A DataFlowAnnotatedGraph instance.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromProgramGraph(\n program_graph)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id, split)\n mapped.data_flow_steps = program_graph.data_flow_steps\n mapped.data_flow_root_node = program_graph.data_flow_root_node\n mapped.data_flow_positive_node_count = (program_graph.\n data_flow_positive_node_count)\n return mapped\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n <function token>\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n <function token>\n\n def ToFile(self, path: pathlib.Path) ->None:\n \"\"\"Dump the pickled graph tuple to file.\n\n This is lossy, as the ir_id column is not dumped.\n\n Args:\n path: The path of the graph tuple to write.\n \"\"\"\n with open(path, 'wb') as f:\n pickle.dump(self.tuple, f)\n <function token>\n <function token>\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n <function token>\n <function token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n\n @property\n def has_data_flow(self) ->bool:\n \"\"\"Returns whether graph tuple has data flow columns.\"\"\"\n return self.data_flow_steps is not None\n <function token>\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n <function token>\n <function token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n <function token>\n <function token>\n\n @property\n def sha1(self) ->str:\n \"\"\"Return the sha1 of the graph tuple.\"\"\"\n return self.data.sha1\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n <function token>\n <function token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @classmethod\n def CreateFromNetworkX(cls, g: nx.MultiDiGraph, ir_id: int, split:\n Optional[int]=None) ->'GraphTuple':\n \"\"\"Create a mapped database instance from the given networkx graph.\n\n This is the preferred method of populating databases of graph tuples, as\n it contains the boilerplate to extract and set the metadata columns, and\n handles the join between the two data/metadata tables invisibly.\n\n Args:\n g: The networkx graph.\n ir_id: The intermediate representation ID.\n split: The split value of this graph.\n\n Returns:\n A GraphTuple instance.\n \"\"\"\n graph_tuple = graph_tuple_lib.GraphTuple.CreateFromNetworkX(g)\n mapped = cls.CreateFromGraphTuple(graph_tuple, ir_id=ir_id, split=split\n )\n mapped.data_flow_steps = g.graph.get('data_flow_steps')\n mapped.data_flow_root_node = g.graph.get('data_flow_root_node')\n mapped.data_flow_positive_node_count = g.graph.get(\n 'data_flow_positive_node_count')\n return mapped\n <function token>\n <function token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass GraphTuple(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, primary_key=True)\n ir_id: int = sql.Column(sql.Integer, nullable=False, index=True)\n split: Optional[int] = sql.Column(sql.Integer, nullable=True, index=True)\n node_count: int = sql.Column(sql.Integer, nullable=False)\n control_edge_count: int = sql.Column(sql.Integer, nullable=False)\n data_edge_count: int = sql.Column(sql.Integer, nullable=False)\n call_edge_count: int = sql.Column(sql.Integer, nullable=False)\n edge_position_max: int = sql.Column(sql.Integer().with_variant(sqlite.\n FLOAT(), 'sqlite'), nullable=False)\n node_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n node_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_x_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n graph_y_dimensionality: int = sql.Column(sql.Integer, default=0,\n nullable=False)\n pickled_graph_tuple_size: int = sql.Column(sql.Integer, nullable=False)\n data_flow_steps: int = sql.Column(sql.Integer, nullable=True)\n data_flow_root_node: int = sql.Column(sql.Integer, nullable=True)\n data_flow_positive_node_count: int = sql.Column(sql.Integer, nullable=True)\n timestamp: datetime.datetime = sqlutil.ColumnFactory.MillisecondDatetime()\n data: 'GraphTupleData' = sql.orm.relationship('GraphTupleData', uselist\n =False, cascade='all, delete-orphan')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n \"\"\"The pickled graph tuple data. See GraphTuple for the parent table.\"\"\"\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass GraphTupleData(Base, sqlutil.PluralTablenameFromCamelCapsClassNameMixin):\n <docstring token>\n id: int = sql.Column(sql.Integer, sql.ForeignKey('graph_tuples.id',\n onupdate='CASCADE', ondelete='CASCADE'), primary_key=True)\n sha1: str = sql.Column(sql.String(40), nullable=False, index=True)\n pickled_graph_tuple: bytes = sql.Column(sqlutil.ColumnTypes.LargeBinary\n (), nullable=False)\n\n\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n \"\"\"A database of GraphTuples.\"\"\"\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n\n @database_statistic\n def data_edge_count(self) ->int:\n \"\"\"The total data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count or 0)\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n\n @database_statistic\n def data_flow_positive_node_count_min(self) ->Optional[int]:\n \"\"\"The minimum data flow positive node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_min or 0)\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n\n @database_statistic\n def node_count(self) ->int:\n \"\"\"The total node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count or 0)\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n\n def __repr__(self) ->str:\n return (\n f\"Database of {humanize.DecimalPrefix(self.graph_count, 'graph')} with dimensionalities: node_x={self.node_x_dimensionality}, node_y={self.node_y_dimensionality}, graph_x={self.graph_x_dimensionality}, graph_y={self.graph_y_dimensionality}.\"\n )\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n\n @database_statistic\n def split_counts(self) ->Dict[int, int]:\n \"\"\"Return a dictionary mapping split to the number of graphs.\"\"\"\n if self._split_counts is None:\n self.RefreshStats()\n return self._split_counts\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n\n @database_statistic\n def data_edge_count_max(self) ->int:\n \"\"\"The maximum data edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.data_edge_count_max or 0)\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n\n @database_statistic\n def node_count_max(self) ->int:\n \"\"\"The maximum node count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_count_max or 0)\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n\n @database_statistic\n def data_flow_steps_min(self) ->Optional[int]:\n \"\"\"The minimum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_min or 0)\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n\n @database_statistic\n def graph_data_size(self) ->int:\n \"\"\"The total size of the non-empty graph data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size or 0)\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n\n def RefreshStats(self):\n \"\"\"Compute the database stats for access via the instance properties.\n\n Raises:\n ValueError: If the database contains invalid entries, e.g. inconsistent\n vector dimensionalities.\n \"\"\"\n with self.ctx.Profile(2, lambda t:\n f\"Computed stats over {humanize.BinaryPrefix(stats.graph_data_size, 'B')} database ({humanize.Plural(stats.graph_count, 'graph')})\"\n ), self.Session() as session:\n query = session.query(sql.func.count(GraphTuple.id).label(\n 'graph_count'), sql.func.count(sql.func.distinct(GraphTuple\n .ir_id)).label('ir_count'), sql.func.count(sql.func.\n distinct(GraphTuple.split)).label('split_count'), sql.func.\n sum(GraphTuple.node_count).label('node_count'), sql.func.\n sum(GraphTuple.control_edge_count).label(\n 'control_edge_count'), sql.func.sum(GraphTuple.\n data_edge_count).label('data_edge_count'), sql.func.sum(\n GraphTuple.call_edge_count).label('call_edge_count'), sql.\n func.sum(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count'), sql.func.max(GraphTuple.node_count).label(\n 'node_count_max'), sql.func.max(GraphTuple.\n control_edge_count).label('control_edge_count_max'), sql.\n func.max(GraphTuple.data_edge_count).label(\n 'data_edge_count_max'), sql.func.max(GraphTuple.\n call_edge_count).label('call_edge_count_max'), sql.func.max\n (GraphTuple.call_edge_count).label('call_edge_count_max'),\n sql.func.max(GraphTuple.control_edge_count + GraphTuple.\n data_edge_count + GraphTuple.call_edge_count).label(\n 'edge_count_max'), sql.func.max(GraphTuple.\n edge_position_max).label('edge_position_max'), sql.func.\n count(sql.func.distinct(GraphTuple.node_x_dimensionality)).\n label('node_x_dimensionality_count'), sql.func.count(sql.\n func.distinct(GraphTuple.node_y_dimensionality)).label(\n 'node_y_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_x_dimensionality)).label(\n 'graph_x_dimensionality_count'), sql.func.count(sql.func.\n distinct(GraphTuple.graph_y_dimensionality)).label(\n 'graph_y_dimensionality_count'), sql.func.max(GraphTuple.\n node_x_dimensionality).label('node_x_dimensionality'), sql.\n func.max(GraphTuple.node_y_dimensionality).label(\n 'node_y_dimensionality'), sql.func.max(GraphTuple.\n graph_x_dimensionality).label('graph_x_dimensionality'),\n sql.func.max(GraphTuple.graph_y_dimensionality).label(\n 'graph_y_dimensionality'), sql.func.sum(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size'), sql.\n func.min(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_min'), sql.func.avg(GraphTuple.\n pickled_graph_tuple_size).label('graph_data_size_avg'), sql\n .func.max(GraphTuple.pickled_graph_tuple_size).label(\n 'graph_data_size_max'), sql.func.count(GraphTuple.\n data_flow_steps).label('data_flow_steps_count'), sql.func.\n min(GraphTuple.data_flow_steps).label('data_flow_steps_min'\n ), sql.func.avg(GraphTuple.data_flow_steps).label(\n 'data_flow_steps_avg'), sql.func.max(GraphTuple.\n data_flow_steps).label('data_flow_steps_max'), sql.func.min\n (GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_min'), sql.func.avg(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_avg'), sql.func.max(\n GraphTuple.data_flow_positive_node_count).label(\n 'data_flow_positive_node_count_max'))\n query = query.filter(GraphTuple.node_count > 1)\n stats = query.one()\n if stats.node_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_x_dimensionality_count} distinct node x dimensionalities'\n )\n if stats.node_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.node_y_dimensionality_count} distinct node y dimensionalities'\n )\n if stats.graph_x_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_x_dimensionality_count} distinct graph x dimensionalities'\n )\n if stats.graph_y_dimensionality_count > 1:\n raise ValueError(\n f'Database contains {stats.graph_y_dimensionality_count} distinct graph y dimensionalities'\n )\n if not (stats.data_flow_steps_count == 0 or stats.\n data_flow_steps_count == stats.graph_count):\n raise ValueError(\n f'{stats.graph_count - stats.data_flow_steps_count} of {stats.graph_count} graphs have no data_flow_steps value'\n )\n self._graph_tuple_stats = stats\n with self.Session() as session:\n self._splits = sorted(set([row.split for row in session.\n query(GraphTuple.split).filter(GraphTuple.split != None\n ).group_by(GraphTuple.split)]))\n self._split_counts = {split: session.query(sql.func.count(\n GraphTuple.id)).filter(GraphTuple.split == split).\n scalar() for split in self._splits}\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n\n @database_statistic\n def graph_data_size_max(self) ->int:\n \"\"\"The maximum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_max or 0)\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n\n @database_statistic\n def edge_count(self) ->int:\n \"\"\"The total edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count or 0)\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n\n @database_statistic\n def graph_data_size_avg(self) ->float:\n \"\"\"The average size of the non-empty graph tuple data, in bytes.\"\"\"\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n\n @database_statistic\n def edge_count_max(self) ->int:\n \"\"\"The maximum edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_count_max or 0)\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n\n @property\n def stats_json(self) ->Dict[str, Any]:\n \"\"\"Fetch the database statics as a JSON dictionary.\"\"\"\n return {name: function(self) for name, function in\n database_statistics_registry}\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count(self) ->int:\n \"\"\"The total control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count or 0)\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_avg(self) ->Optional[int]:\n \"\"\"The minimum data flow average node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_avg or 0)\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n\n @database_statistic\n def graph_x_dimensionality(self) ->int:\n \"\"\"The graph x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_x_dimensionality or 0)\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_data_size_min(self) ->int:\n \"\"\"The minimum size of the non-empty graph tuple data, in bytes.\"\"\"\n return int(self.graph_tuple_stats.graph_data_size_min or 0)\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n\n @database_statistic\n def control_edge_count_max(self) ->int:\n \"\"\"The maximum control edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.control_edge_count_max or 0)\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n\n @database_statistic\n def node_y_dimensionality(self) ->int:\n \"\"\"The node y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_y_dimensionality or 0)\n <function token>\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n <function token>\n <function token>\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n\n @database_statistic\n def data_flow_steps_max(self) ->Optional[int]:\n \"\"\"The maximum data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.data_flow_steps_max or 0)\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n <function token>\n <function token>\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n\n @database_statistic\n def splits(self) ->List[int]:\n \"\"\"Return a list of unique split values.\"\"\"\n if self._splits is None:\n self.RefreshStats()\n return self._splits\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n <function token>\n <function token>\n\n @database_statistic\n def graph_y_dimensionality(self) ->int:\n \"\"\"The graph y dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.graph_y_dimensionality or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n\n def __init__(self, url: str, must_exist: bool=False, ctx: progress.\n ProgressContext=progress.NullContext):\n super(Database, self).__init__(url, Base, must_exist=must_exist)\n self.ctx = ctx\n self._graph_tuple_stats = None\n self._splits = None\n self._split_counts = None\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n\n @database_statistic\n def node_x_dimensionality(self) ->int:\n \"\"\"The node x dimensionality of all non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.node_x_dimensionality or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count(self) ->int:\n \"\"\"The total call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n\n @database_statistic\n def split_count(self) ->int:\n \"\"\"The number of distinct splits in the database.\"\"\"\n return int(self.graph_tuple_stats.split_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n\n @database_statistic\n def edge_position_max(self) ->int:\n \"\"\"The maximum edge position in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.edge_position_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_positive_node_count_max(self) ->Optional[int]:\n \"\"\"The minimum data flow max node count for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return int(self.graph_tuple_stats.\n data_flow_positive_node_count_max or 0)\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n\n @database_statistic\n def data_flow_null_count(self) ->int:\n \"\"\"The number of database rows without data flow information.\n\n If > 0, then has_data_flow is False.\n \"\"\"\n return self.graph_count - int(self.graph_tuple_stats.\n data_flow_steps_count or 0)\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n\n @database_statistic\n def graph_count(self) ->int:\n \"\"\"The number of non-empty graphs in the database.\"\"\"\n return int(self.graph_tuple_stats.graph_count)\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n <function token>\n\n @database_statistic\n def ir_count(self) ->int:\n \"\"\"The number of distinct intermediate representations that the non-empty\n graphs are constructed from.\n \"\"\"\n return int(self.graph_tuple_stats.ir_count or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n <function token>\n <function token>\n\n @database_statistic\n def data_flow_steps_avg(self) ->Optional[float]:\n \"\"\"The average data flow steps for non-empty graphs.\"\"\"\n if self.has_data_flow:\n return float(self.graph_tuple_stats.data_flow_steps_avg)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @property\n def graph_tuple_stats(self):\n \"\"\"Fetch aggregate graph tuple stats, or compute them if not set.\"\"\"\n if self._graph_tuple_stats is None:\n self.RefreshStats()\n return self._graph_tuple_stats\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def has_data_flow(self) ->bool:\n \"\"\"Return whether the graph database has data flow annotations.\n\n This is only true if *all* columns have data flow values.\n \"\"\"\n return self.graph_count and not self.data_flow_null_count\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n @database_statistic\n def call_edge_count_max(self) ->int:\n \"\"\"The maximum call edge count in non-empty graphs.\"\"\"\n return int(self.graph_tuple_stats.call_edge_count_max or 0)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n\n\nclass Database(sqlutil.Database):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<code token>\n<function token>\n<class token>\n<code token>\n<function token>\n<code token>\n"
] | false |
582 |
be1bfa3e366d715d32613284924cf79abde06d41
|
# -*- coding: utf-8 -*-
"""
-----------------------------------------
IDEA Name : PyCharm
Project Name : HelloWorld
-----------------------------------------
File Name : task_worker
Description :
Author : Edwin
Date : 2018/1/4 23:38
-----------------------------------------
Changer : Edwin
Date : 2018/1/4 23:38
Description :
-----------------------------------------
"""
__author__ = 'Edwin'
import queue
import time
from multiprocessing.managers import BaseManager
# 创建类似的QueueManager:
class QueueManager(BaseManager):
pass
def start_request():
# 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:
QueueManager.register('get_task_queue')
QueueManager.register('get_result_queue')
# 连接到服务器,也就是运行task_master.py的机器:
server_add = '127.0.0.1'
print('Connect to server %s...' % server_add)
# 端口和验证码注意保持与task_master.py设置的完全一致:
manager = QueueManager(address=(server_add, 5000), authkey=b'abc')
# 从网络连接:
manager.connect()
# 获取Queue的对象:
task = manager.get_task_queue()
result = manager.get_result_queue()
# 从task队列取任务,并把结果写入result队列:
for i in range(10):
try:
n = task.get(timeout=1)
print('run task %d * %d...' % (n, n))
r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))
time.sleep(5)
result.put(r)
except queue.Empty:
print('task queue is empty!')
# 处理结果
print('worker exit..')
if __name__ == '__main__':
start_request()
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\n-----------------------------------------\n IDEA Name : PyCharm \n Project Name : HelloWorld\n-----------------------------------------\n File Name : task_worker\n Description :\n Author : Edwin\n Date : 2018/1/4 23:38\n-----------------------------------------\n Changer : Edwin\n Date : 2018/1/4 23:38\n Description : \n-----------------------------------------\n\"\"\"\n__author__ = 'Edwin'\n\nimport queue\nimport time\nfrom multiprocessing.managers import BaseManager\n\n\n# 创建类似的QueueManager:\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n # 由于这个QueueManager只从网络上获取Queue,所以注册时只提供名字:\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n\n # 连接到服务器,也就是运行task_master.py的机器:\n server_add = '127.0.0.1'\n\n print('Connect to server %s...' % server_add)\n # 端口和验证码注意保持与task_master.py设置的完全一致:\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n # 从网络连接:\n manager.connect()\n # 获取Queue的对象:\n\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n\n # 从task队列取任务,并把结果写入result队列:\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n # 处理结果\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"<docstring token>\n__author__ = 'Edwin'\nimport queue\nimport time\nfrom multiprocessing.managers import BaseManager\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"<docstring token>\n__author__ = 'Edwin'\n<import token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\nif __name__ == '__main__':\n start_request()\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\ndef start_request():\n QueueManager.register('get_task_queue')\n QueueManager.register('get_result_queue')\n server_add = '127.0.0.1'\n print('Connect to server %s...' % server_add)\n manager = QueueManager(address=(server_add, 5000), authkey=b'abc')\n manager.connect()\n task = manager.get_task_queue()\n result = manager.get_result_queue()\n for i in range(10):\n try:\n n = task.get(timeout=1)\n print('run task %d * %d...' % (n, n))\n r = '%d * %d = %d' % (n + 1, n + 1, (n + 1) * (n + 1))\n time.sleep(5)\n result.put(r)\n except queue.Empty:\n print('task queue is empty!')\n print('worker exit..')\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass QueueManager(BaseManager):\n pass\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
583 |
63d9a0fa0d0747762e65f6f1e85e53090035454c
|
from mongoengine import Document, StringField, BooleanField, ListField, Q
import exceptions
class Category(Document):
id = StringField(primary_key=True)
name = StringField()
is_base_expenses = BooleanField(default=False)
aliases = ListField(StringField())
@classmethod
def get_category_by_text(cls, category_text: str) -> 'Category':
try:
category = cls.objects.get(
Q(name=category_text) |
Q(aliases=category_text.lower())
)
except Category.DoesNotExist:
raise exceptions.InvalidCategory(
'Нет такой категории по имени или алиасам'
)
return category
|
[
"from mongoengine import Document, StringField, BooleanField, ListField, Q\n\nimport exceptions\n\n\nclass Category(Document):\n id = StringField(primary_key=True)\n name = StringField()\n is_base_expenses = BooleanField(default=False)\n aliases = ListField(StringField())\n\n @classmethod\n def get_category_by_text(cls, category_text: str) -> 'Category':\n try:\n category = cls.objects.get(\n Q(name=category_text) |\n Q(aliases=category_text.lower())\n )\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам'\n )\n\n return category\n",
"from mongoengine import Document, StringField, BooleanField, ListField, Q\nimport exceptions\n\n\nclass Category(Document):\n id = StringField(primary_key=True)\n name = StringField()\n is_base_expenses = BooleanField(default=False)\n aliases = ListField(StringField())\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"<import token>\n\n\nclass Category(Document):\n id = StringField(primary_key=True)\n name = StringField()\n is_base_expenses = BooleanField(default=False)\n aliases = ListField(StringField())\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"<import token>\n\n\nclass Category(Document):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n @classmethod\n def get_category_by_text(cls, category_text: str) ->'Category':\n try:\n category = cls.objects.get(Q(name=category_text) | Q(aliases=\n category_text.lower()))\n except Category.DoesNotExist:\n raise exceptions.InvalidCategory(\n 'Нет такой категории по имени или алиасам')\n return category\n",
"<import token>\n\n\nclass Category(Document):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
584 |
7163be250ae3a22931de037cb6896c2e6d5f00a8
|
'''
Encontrar el valor mas alto el mas rapido, el mas lento
para eso son los algoritmos de optimizacion
Para eso debemos pensar en una funcion que queramos maximizar o minimizar
Se aplican mas que todo para empresas como despegar, en donde se pueden generar buenas empresas
Empresas a la optimizacion
#############################################33
Traveling Sales Man
Cual es la ruta mas eficiente para recorrer todas las ciudades
Resolver el algoritmo de sales man
Turing Prize
'''
|
[
"'''\n Encontrar el valor mas alto el mas rapido, el mas lento\n para eso son los algoritmos de optimizacion\n Para eso debemos pensar en una funcion que queramos maximizar o minimizar\n Se aplican mas que todo para empresas como despegar, en donde se pueden generar buenas empresas\n Empresas a la optimizacion \n #############################################33\n Traveling Sales Man\n Cual es la ruta mas eficiente para recorrer todas las ciudades\n Resolver el algoritmo de sales man \n Turing Prize\n'''\n",
"<docstring token>\n"
] | false |
585 |
b5275fc068526063fd8baf13210052971b05503f
|
from matasano import *
ec = EC_M(233970423115425145524320034830162017933,534,1,4,order=233970423115425145498902418297807005944)
assert(ec.scale(4,ec.order) == 0)
aPriv = randint(1,ec.order-1)
aPub = ec.scale(4,aPriv)
print("Factoring...")
twist_ord = 2*ec.prime+2 - ec.order
factors = []
x = twist_ord
for i in range(2,2**24):
if x%i == 0:
if x%(i*i) != 0:
factors.append(i)
x = pp(x,i)
print("Getting remainders...")
rems = []
for f in factors:
u = 0
while u == 0:
while isQRes((u**3+ec.A*u**2+u)%ec.prime,ec.prime):
u = randint(1,ec.prime-1)
u = ec.scale(u,pp(twist_ord,f))
while ec.scale(u,f) != 0:
u = ec.scale(u,f)
shared = ec.scale(u,aPriv) #Not generating the MAC this time
for i in range(f):
if ec.scale(u,i) == shared:
print("\tSolved mod %d"%f)
rems.append(i)
break
#Now aPriv is +-rems[i] mod factors[i]
#Do them 2 at a time to get down to 2 values mod Prod factors[i]
print("Correcting parities...")
for i in range(len(factors)):
if rems[i] != 0:
break
fixed = i
for i in range(len(factors)):
if i == fixed:
continue
u = 0
while u == 0:
while isQRes((u**3+ec.A*u**2+u)%ec.prime,ec.prime):
u = randint(1,ec.prime-1)
u = ec.scale(u,pp(pp(twist_ord,factors[fixed]),factors[i]))
if ec.scale(u,factors[fixed]) == 0:
u = 0
elif ec.scale(u,factors[i]) == 0:
u = 0
shared = ec.scale(u,aPriv)
r,_ = crt([rems[fixed],rems[i]],[factors[fixed],factors[i]])
if ec.scale(u,r) != shared:
rems[i] = (-rems[i])%factors[i]
#Now I need to run down the remaining bits
|
[
"from matasano import *\r\n\r\nec = EC_M(233970423115425145524320034830162017933,534,1,4,order=233970423115425145498902418297807005944)\r\nassert(ec.scale(4,ec.order) == 0)\r\n\r\naPriv = randint(1,ec.order-1)\r\naPub = ec.scale(4,aPriv)\r\n\r\nprint(\"Factoring...\")\r\ntwist_ord = 2*ec.prime+2 - ec.order\r\nfactors = []\r\nx = twist_ord\r\nfor i in range(2,2**24):\r\n\tif x%i == 0:\r\n\t\tif x%(i*i) != 0:\r\n\t\t\tfactors.append(i)\r\n\t\tx = pp(x,i)\r\n\t\t\r\nprint(\"Getting remainders...\")\r\nrems = []\r\nfor f in factors:\r\n\tu = 0\r\n\twhile u == 0:\r\n\t\twhile isQRes((u**3+ec.A*u**2+u)%ec.prime,ec.prime):\r\n\t\t\tu = randint(1,ec.prime-1)\r\n\t\tu = ec.scale(u,pp(twist_ord,f))\r\n\twhile ec.scale(u,f) != 0:\r\n\t\tu = ec.scale(u,f)\r\n\tshared = ec.scale(u,aPriv)\t#Not generating the MAC this time\r\n\tfor i in range(f):\r\n\t\tif ec.scale(u,i) == shared:\r\n\t\t\tprint(\"\\tSolved mod %d\"%f)\r\n\t\t\trems.append(i)\r\n\t\t\tbreak\r\n\r\n#Now aPriv is +-rems[i] mod factors[i]\r\n#Do them 2 at a time to get down to 2 values mod Prod factors[i]\r\nprint(\"Correcting parities...\")\r\nfor i in range(len(factors)):\r\n\tif rems[i] != 0:\r\n\t\tbreak\r\nfixed = i\r\nfor i in range(len(factors)):\r\n\tif i == fixed:\r\n\t\tcontinue\r\n\tu = 0\r\n\twhile u == 0:\r\n\t\twhile isQRes((u**3+ec.A*u**2+u)%ec.prime,ec.prime):\r\n\t\t\tu = randint(1,ec.prime-1)\r\n\t\tu = ec.scale(u,pp(pp(twist_ord,factors[fixed]),factors[i]))\r\n\t\tif ec.scale(u,factors[fixed]) == 0:\r\n\t\t\tu = 0\r\n\t\telif ec.scale(u,factors[i]) == 0:\r\n\t\t\tu = 0\r\n\tshared = ec.scale(u,aPriv)\r\n\tr,_ = crt([rems[fixed],rems[i]],[factors[fixed],factors[i]])\r\n\tif ec.scale(u,r) != shared:\r\n\t\trems[i] = (-rems[i])%factors[i]\r\n\t\t\r\n#Now I need to run down the remaining bits\r\n",
"from matasano import *\nec = EC_M(233970423115425145524320034830162017933, 534, 1, 4, order=\n 233970423115425145498902418297807005944)\nassert ec.scale(4, ec.order) == 0\naPriv = randint(1, ec.order - 1)\naPub = ec.scale(4, aPriv)\nprint('Factoring...')\ntwist_ord = 2 * ec.prime + 2 - ec.order\nfactors = []\nx = twist_ord\nfor i in range(2, 2 ** 24):\n if x % i == 0:\n if x % (i * i) != 0:\n factors.append(i)\n x = pp(x, i)\nprint('Getting remainders...')\nrems = []\nfor f in factors:\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(twist_ord, f))\n while ec.scale(u, f) != 0:\n u = ec.scale(u, f)\n shared = ec.scale(u, aPriv)\n for i in range(f):\n if ec.scale(u, i) == shared:\n print('\\tSolved mod %d' % f)\n rems.append(i)\n break\nprint('Correcting parities...')\nfor i in range(len(factors)):\n if rems[i] != 0:\n break\nfixed = i\nfor i in range(len(factors)):\n if i == fixed:\n continue\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))\n if ec.scale(u, factors[fixed]) == 0:\n u = 0\n elif ec.scale(u, factors[i]) == 0:\n u = 0\n shared = ec.scale(u, aPriv)\n r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])\n if ec.scale(u, r) != shared:\n rems[i] = -rems[i] % factors[i]\n",
"<import token>\nec = EC_M(233970423115425145524320034830162017933, 534, 1, 4, order=\n 233970423115425145498902418297807005944)\nassert ec.scale(4, ec.order) == 0\naPriv = randint(1, ec.order - 1)\naPub = ec.scale(4, aPriv)\nprint('Factoring...')\ntwist_ord = 2 * ec.prime + 2 - ec.order\nfactors = []\nx = twist_ord\nfor i in range(2, 2 ** 24):\n if x % i == 0:\n if x % (i * i) != 0:\n factors.append(i)\n x = pp(x, i)\nprint('Getting remainders...')\nrems = []\nfor f in factors:\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(twist_ord, f))\n while ec.scale(u, f) != 0:\n u = ec.scale(u, f)\n shared = ec.scale(u, aPriv)\n for i in range(f):\n if ec.scale(u, i) == shared:\n print('\\tSolved mod %d' % f)\n rems.append(i)\n break\nprint('Correcting parities...')\nfor i in range(len(factors)):\n if rems[i] != 0:\n break\nfixed = i\nfor i in range(len(factors)):\n if i == fixed:\n continue\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))\n if ec.scale(u, factors[fixed]) == 0:\n u = 0\n elif ec.scale(u, factors[i]) == 0:\n u = 0\n shared = ec.scale(u, aPriv)\n r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])\n if ec.scale(u, r) != shared:\n rems[i] = -rems[i] % factors[i]\n",
"<import token>\n<assignment token>\nassert ec.scale(4, ec.order) == 0\n<assignment token>\nprint('Factoring...')\n<assignment token>\nfor i in range(2, 2 ** 24):\n if x % i == 0:\n if x % (i * i) != 0:\n factors.append(i)\n x = pp(x, i)\nprint('Getting remainders...')\n<assignment token>\nfor f in factors:\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(twist_ord, f))\n while ec.scale(u, f) != 0:\n u = ec.scale(u, f)\n shared = ec.scale(u, aPriv)\n for i in range(f):\n if ec.scale(u, i) == shared:\n print('\\tSolved mod %d' % f)\n rems.append(i)\n break\nprint('Correcting parities...')\nfor i in range(len(factors)):\n if rems[i] != 0:\n break\n<assignment token>\nfor i in range(len(factors)):\n if i == fixed:\n continue\n u = 0\n while u == 0:\n while isQRes((u ** 3 + ec.A * u ** 2 + u) % ec.prime, ec.prime):\n u = randint(1, ec.prime - 1)\n u = ec.scale(u, pp(pp(twist_ord, factors[fixed]), factors[i]))\n if ec.scale(u, factors[fixed]) == 0:\n u = 0\n elif ec.scale(u, factors[i]) == 0:\n u = 0\n shared = ec.scale(u, aPriv)\n r, _ = crt([rems[fixed], rems[i]], [factors[fixed], factors[i]])\n if ec.scale(u, r) != shared:\n rems[i] = -rems[i] % factors[i]\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
586 |
aa4fd27382119e3b10d2b57c9b87deff32b5c1ab
|
from final import getMood
import pickle
def get_mood(username_t,username_i):
mapping={'sadness':'0,0,255','angry':'255,0,0','happy':'0,255,0','surprise':'139,69,19','neutral':'189,183,107','fear':'255,165,0'}
#Sad: Blue, Angry: Red, Happy: Green, Surprise: Brown, Neutral:Yellow,Fear:Orange
mood=getMood(username_i,username_t)
value=mood[0][0]
print value
with open('colorfile', 'wb') as fp:
pickle.dump(mapping[value], fp)
return mood
|
[
"from final import getMood\nimport pickle\ndef get_mood(username_t,username_i):\n mapping={'sadness':'0,0,255','angry':'255,0,0','happy':'0,255,0','surprise':'139,69,19','neutral':'189,183,107','fear':'255,165,0'}\n #Sad: Blue, Angry: Red, Happy: Green, Surprise: Brown, Neutral:Yellow,Fear:Orange\n \n mood=getMood(username_i,username_t)\n value=mood[0][0]\n print value\n with open('colorfile', 'wb') as fp:\n pickle.dump(mapping[value], fp)\n return mood\n \n\n \n\n"
] | true |
587 |
dee1ab3adb7f627680410c774be44ae196f63f6c
|
#!/usr/bin/env python3
import base64
from apiclient import errors
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import mimetypes
def Get_Attachments(service, userId, msg_id, store_dir):
"""Get and store attachment from Message with given id.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: ID of Message containing attachment.
store_dir: The directory used to store attachments.
"""
try:
message = service.users().messages().get(userId=userId, id=msg_id).execute()
parts = [message['payload']]
while parts:
part = parts.pop()
if part.get('parts'):
parts.extend(part['parts'])
if part.get('filename'):
if 'data' in part['body']:
file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))
#self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))
elif 'attachmentId' in part['body']:
attachment = service.users().messages().attachments().get(
userId=userId, messageId=message['id'], id=part['body']['attachmentId']
).execute()
file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))
#self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))
else:
file_data = None
if file_data:
#do some staff, e.g.
path = ''.join([store_dir, part['filename']])
with open(path, 'wb') as f:
f.write(file_data)
except errors.HttpError as error:
print('An error occurred: %s' % error)
def Reply_With_Attchment(service, userId, receiver, subject, message, attachments, threadId, message_id):
"""Reply to message with the new pdf attached.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
receiver: Email address of who to send to.
subject: Email subject.
message: Email message, plain text
attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs
threadId: Used to match reply with message thread
message_id: Identifies specific message to interact with.
"""
# Create email message
emailMsg = message
mimeMessage = MIMEMultipart()
mimeMessage['to'] = receiver
mimeMessage['subject'] = subject
mimeMessage['threadId'] = threadId
mimeMessage['In-Reply-To'] = message_id
mimeMessage['References'] = message_id
mimeMessage.attach(MIMEText(emailMsg, 'plain'))
# Attach files
if attachments != None:
attachment = attachments
content_type = mimetypes.guess_type(attachment)
main_type, sub_type = content_type[0].split('/', 1)
file_name = os.path.basename(attachment)
f = open(attachment, 'rb')
myFile = MIMEBase(main_type, sub_type)
myFile.set_payload(f.read())
myFile.add_header('Content-Disposition', 'attachment', filename=file_name)
encoders.encode_base64(myFile)
f.close()
mimeMessage.attach(myFile)
raw_string = {'raw':base64.urlsafe_b64encode(mimeMessage.as_bytes()).decode()}
raw_string['threadId']=threadId
message = service.users().messages().send(userId=userId, body=raw_string).execute()
def Get_Unread_Messages(service, userId):
"""Retrieves all unread messages with attachments, returns list of message ids.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
"""
message_list = []
message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt="json", q='is:unread has:attachment').execute()
if message_ids['resultSizeEstimate'] > 0:
for message in message_ids['messages']:
message_list.append(message['id'])
return message_list
def Get_Message_Info(service, userId, message_id):
"""Retrieves received message info, returns tuple.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
message_info = service.users().messages().get(userId=userId, id=message_id).execute()
ID = message_info['id']
thread_id = message_info['threadId']
header_info = message_info['payload']['headers']
for header in header_info:
if header['name']=='Message-ID':
message_id=header['value']
if header['name']=='From':
sender=header['value']
if header['name']=='Subject':
subject=header['value']
attachment_info = message_info['payload']['parts']
attachment_list = []
for attachment in attachment_info:
if attachment['mimeType'] == 'application/pdf':
attachment_list.append(attachment['filename'])
info = (sender, subject, thread_id, message_id, attachment_list, ID)
return info
def Delete_Message(service, userId, message_id):
"""Permanently delete message.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
service.users().messages().delete(userId=userId, id=message_id).execute()
|
[
"#!/usr/bin/env python3\n\nimport base64\nfrom apiclient import errors\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport mimetypes\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part['body']['attachmentId']\n ).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))\n else:\n file_data = None\n if file_data:\n #do some staff, e.g.\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message, attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n # Create email message\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n \n # Attach files\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n\n f = open(attachment, 'rb')\n\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=file_name)\n encoders.encode_base64(myFile)\n\n f.close()\n\n mimeMessage.attach(myFile)\n \n raw_string = {'raw':base64.urlsafe_b64encode(mimeMessage.as_bytes()).decode()}\n raw_string['threadId']=threadId\n \n message = service.users().messages().send(userId=userId, body=raw_string).execute()\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt=\"json\", q='is:unread has:attachment').execute()\n \n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n\n return message_list\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id).execute()\n\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name']=='Message-ID':\n message_id=header['value']\n if header['name']=='From':\n sender=header['value']\n if header['name']=='Subject':\n subject=header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n\n info = (sender, subject, thread_id, message_id, attachment_list, ID)\n return info\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()",
"import base64\nfrom apiclient import errors\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport mimetypes\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds=\n 'INBOX', alt='json', q='is:unread has:attachment').execute()\n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n return message_list\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"<import token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds=\n 'INBOX', alt='json', q='is:unread has:attachment').execute()\n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n return message_list\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"<import token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds=\n 'INBOX', alt='json', q='is:unread has:attachment').execute()\n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n return message_list\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\n<function token>\n",
"<import token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds=\n 'INBOX', alt='json', q='is:unread has:attachment').execute()\n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n return message_list\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
588 |
c4b9fdba9e9eeccc52999dab9232302f159c882a
|
# Created by MechAviv
# [Maestra Fiametta] | [9390220]
# Commerci Republic : San Commerci
if sm.hasItem(4310100, 1):
sm.setSpeakerID(9390220)
sm.sendSayOkay("You can't start your voyage until you finish the tutorial quest!")
else:
sm.setSpeakerID(9390220)
sm.sendNext("What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.")
sm.setSpeakerID(9390220)
sm.sendSay("Just remember, you can't trade without gold!")
sm.giveItem(4310100, 10)
sm.setSpeakerID(9390220)
sm.sendPrev("Check to make sure there you have coins in your inventory.")
|
[
"# Created by MechAviv\n# [Maestra Fiametta] | [9390220]\n# Commerci Republic : San Commerci\nif sm.hasItem(4310100, 1):\n sm.setSpeakerID(9390220)\n sm.sendSayOkay(\"You can't start your voyage until you finish the tutorial quest!\")\nelse:\n sm.setSpeakerID(9390220)\n sm.sendNext(\"What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.\")\n\n\n sm.setSpeakerID(9390220)\n sm.sendSay(\"Just remember, you can't trade without gold!\")\n\n\n sm.giveItem(4310100, 10)\n sm.setSpeakerID(9390220)\n sm.sendPrev(\"Check to make sure there you have coins in your inventory.\")",
"if sm.hasItem(4310100, 1):\n sm.setSpeakerID(9390220)\n sm.sendSayOkay(\n \"You can't start your voyage until you finish the tutorial quest!\")\nelse:\n sm.setSpeakerID(9390220)\n sm.sendNext(\n 'What? You threw away the coins without finishing the tutorial? (Sighs) I suppose I can give you some more coins so that you can complete the tutorial.'\n )\n sm.setSpeakerID(9390220)\n sm.sendSay(\"Just remember, you can't trade without gold!\")\n sm.giveItem(4310100, 10)\n sm.setSpeakerID(9390220)\n sm.sendPrev('Check to make sure there you have coins in your inventory.')\n",
"<code token>\n"
] | false |
589 |
5a103a4f72b9cd3ea3911aeefeeb2194c8ad7df0
|
#coding: utf-8
import mmh3
from bitarray import bitarray
BIT_SIZE = 1 << 30
class BloomFilter:
def __init__(self):
# Initialize bloom filter, set size and all bits to 0
bit_array = bitarray(BIT_SIZE)
bit_array.setall(0)
self.bit_array = bit_array
def add(self, val):
point_list = self.get_postions(val)
for b in point_list:
self.bit_array[b] = 1
def get_postions(self, val):
# Get points positions in bit vector.
# 提供不同的hash种子得到多个hash函数, seed最好为质数
point1 = mmh3.hash(val, 5) % BIT_SIZE
point2 = mmh3.hash(val, 7) % BIT_SIZE
point3 = mmh3.hash(val, 11) % BIT_SIZE
point4 = mmh3.hash(val, 13) % BIT_SIZE
point7 = mmh3.hash(val, 19) % BIT_SIZE
point5 = mmh3.hash(val, 23) % BIT_SIZE
point6 = mmh3.hash(val, 31) % BIT_SIZE
return [point1, point2, point3, point4, point5, point6]
def is_contains(self, val):
point_list = self.get_postions(val)
result = True
for b in point_list:
result = result and self.bit_array[b]
return result
if __name__ == '__main__':
bf = BloomFilter()
# 第一次运行时会显示 not exists
if bf.is_contains('zqw'):
print('exists')
else:
print('not exists')
bf.add('zqw')
if bf.is_contains('shooter'):
print('exists')
else:
bf.add('shooter')
if bf.is_contains('zqw'):
print('exists')
else:
bf.add('zqw')
|
[
"#coding: utf-8\nimport mmh3\nfrom bitarray import bitarray\n\nBIT_SIZE = 1 << 30\n\nclass BloomFilter:\n\n def __init__(self):\n # Initialize bloom filter, set size and all bits to 0\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n # Get points positions in bit vector.\n # 提供不同的hash种子得到多个hash函数, seed最好为质数\n\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n\n return result\n\n\nif __name__ == '__main__':\n\n bf = BloomFilter()\n\n # 第一次运行时会显示 not exists\n\n if bf.is_contains('zqw'):\n print('exists')\n else:\n print('not exists')\n bf.add('zqw')\n\n if bf.is_contains('shooter'):\n print('exists')\n else:\n bf.add('shooter')\n\n if bf.is_contains('zqw'):\n print('exists')\n else:\n bf.add('zqw')",
"import mmh3\nfrom bitarray import bitarray\nBIT_SIZE = 1 << 30\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n return result\n\n\nif __name__ == '__main__':\n bf = BloomFilter()\n if bf.is_contains('zqw'):\n print('exists')\n else:\n print('not exists')\n bf.add('zqw')\n if bf.is_contains('shooter'):\n print('exists')\n else:\n bf.add('shooter')\n if bf.is_contains('zqw'):\n print('exists')\n else:\n bf.add('zqw')\n",
"<import token>\nBIT_SIZE = 1 << 30\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n return result\n\n\nif __name__ == '__main__':\n bf = BloomFilter()\n if bf.is_contains('zqw'):\n print('exists')\n else:\n print('not exists')\n bf.add('zqw')\n if bf.is_contains('shooter'):\n print('exists')\n else:\n bf.add('shooter')\n if bf.is_contains('zqw'):\n print('exists')\n else:\n bf.add('zqw')\n",
"<import token>\n<assignment token>\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n return result\n\n\nif __name__ == '__main__':\n bf = BloomFilter()\n if bf.is_contains('zqw'):\n print('exists')\n else:\n print('not exists')\n bf.add('zqw')\n if bf.is_contains('shooter'):\n print('exists')\n else:\n bf.add('shooter')\n if bf.is_contains('zqw'):\n print('exists')\n else:\n bf.add('zqw')\n",
"<import token>\n<assignment token>\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n\n def is_contains(self, val):\n point_list = self.get_postions(val)\n result = True\n for b in point_list:\n result = result and self.bit_array[b]\n return result\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass BloomFilter:\n\n def __init__(self):\n bit_array = bitarray(BIT_SIZE)\n bit_array.setall(0)\n self.bit_array = bit_array\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass BloomFilter:\n <function token>\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n\n def get_postions(self, val):\n point1 = mmh3.hash(val, 5) % BIT_SIZE\n point2 = mmh3.hash(val, 7) % BIT_SIZE\n point3 = mmh3.hash(val, 11) % BIT_SIZE\n point4 = mmh3.hash(val, 13) % BIT_SIZE\n point7 = mmh3.hash(val, 19) % BIT_SIZE\n point5 = mmh3.hash(val, 23) % BIT_SIZE\n point6 = mmh3.hash(val, 31) % BIT_SIZE\n return [point1, point2, point3, point4, point5, point6]\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass BloomFilter:\n <function token>\n\n def add(self, val):\n point_list = self.get_postions(val)\n for b in point_list:\n self.bit_array[b] = 1\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass BloomFilter:\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<code token>\n"
] | false |
590 |
59d04ebd9a45c6a179a2da1f88f728ba2af91c05
|
from django.contrib import admin
from pharma_models.personas.models import Persona
admin.site.register(Persona)
|
[
"from django.contrib import admin\n\nfrom pharma_models.personas.models import Persona \n\n\nadmin.site.register(Persona)",
"from django.contrib import admin\nfrom pharma_models.personas.models import Persona\nadmin.site.register(Persona)\n",
"<import token>\nadmin.site.register(Persona)\n",
"<import token>\n<code token>\n"
] | false |
591 |
ed5653455062cb3468c232cf0fa3f1d18793626a
|
from python_logging.Demo_CustomLogger import CustomLogger
CustomLogger.init_log()
# CustomLogger.info()
log_str = '%s/%s/%s\n' % ("demo1", "demo2", "demo3")
CustomLogger.info('[main]', log_str)
|
[
"from python_logging.Demo_CustomLogger import CustomLogger\n\nCustomLogger.init_log()\n# CustomLogger.info()\nlog_str = '%s/%s/%s\\n' % (\"demo1\", \"demo2\", \"demo3\")\nCustomLogger.info('[main]', log_str)\n\n",
"from python_logging.Demo_CustomLogger import CustomLogger\nCustomLogger.init_log()\nlog_str = '%s/%s/%s\\n' % ('demo1', 'demo2', 'demo3')\nCustomLogger.info('[main]', log_str)\n",
"<import token>\nCustomLogger.init_log()\nlog_str = '%s/%s/%s\\n' % ('demo1', 'demo2', 'demo3')\nCustomLogger.info('[main]', log_str)\n",
"<import token>\nCustomLogger.init_log()\n<assignment token>\nCustomLogger.info('[main]', log_str)\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
592 |
a9f3d5f11a9f2781571029b54d54b41d9f1f83b3
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
from app.models import *
# Register your models here.
class ProfileInline(admin.StackedInline):
model = UserProfile
can_delete = False
verbose_name_plural = 'profile'
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Thread)
admin.site.register(Comment)
admin.site.register(Experience)
admin.site.register(ThreadTag)
admin.site.register(ExperienceTag)
admin.site.register(UserProfile)
admin.site.register(ExperiencesLike)
admin.site.register(ExperiencesDislike)
admin.site.register(Like)
admin.site.register(Dislike)
admin.site.register(Toolbox)
admin.site.register(ToolboxUser)
admin.site.register(Question)
admin.site.register(Answer)
|
[
"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import User\n\nfrom app.models import *\n\n# Register your models here.\n\nclass ProfileInline(admin.StackedInline):\n\tmodel = UserProfile\n\tcan_delete = False\n\tverbose_name_plural = 'profile'\n\nclass UserAdmin(BaseUserAdmin):\n\tinlines = (ProfileInline, )\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)",
"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.contrib.auth.models import User\nfrom app.models import *\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)\n",
"<import token>\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(Thread)\nadmin.site.register(Comment)\nadmin.site.register(Experience)\nadmin.site.register(ThreadTag)\nadmin.site.register(ExperienceTag)\nadmin.site.register(UserProfile)\nadmin.site.register(ExperiencesLike)\nadmin.site.register(ExperiencesDislike)\nadmin.site.register(Like)\nadmin.site.register(Dislike)\nadmin.site.register(Toolbox)\nadmin.site.register(ToolboxUser)\nadmin.site.register(Question)\nadmin.site.register(Answer)\n",
"<import token>\n\n\nclass ProfileInline(admin.StackedInline):\n model = UserProfile\n can_delete = False\n verbose_name_plural = 'profile'\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\n<code token>\n",
"<import token>\n\n\nclass ProfileInline(admin.StackedInline):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass UserAdmin(BaseUserAdmin):\n inlines = ProfileInline,\n\n\n<code token>\n",
"<import token>\n<class token>\n\n\nclass UserAdmin(BaseUserAdmin):\n <assignment token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<code token>\n"
] | false |
593 |
a9b2a4d4924dcdd6e146ea346e71bf42c0259846
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Greger Update Agent (GUA) module for the Greger Client Module
"""
__author__ = "Eric Sandbling"
__license__ = 'MIT'
__status__ = 'Development'
# System modules
import os, sys
import shutil
import logging
import subprocess
from threading import Event
from threading import Thread
from threading import enumerate
# Local Modules
from common import getLocalConfig
from common import restart_program
from gdb import GregerDatabase
# from gcm import GregerClientModule
class GregerUpdateAgent(Thread):
"""
Main class which holds the main sequence of the application.
"""
def __init__(self, ready=None):
'''
Initialize the main class
'''
Thread.__init__(self)
self.ready = ready
# Setup logging
self.logPath = "root.GUA"
self.log = logging.getLogger(self.logPath)
localLog = logging.getLogger(self.logPath + ".__init__")
localLog.debug("Initiating Greger Update Agent (GUA)...")
# Stop execution handler
self.stopExecution = Event()
# Get local path
self._location = os.path.abspath(__file__)
self._location = self._location[:-15] # Trim gcm/__main__.py from path to get at location of application
localLog.debug("Local path: " + self._location)
# Get Local Configuration Parameters
localLog.debug("Getting configuration parameters from file...")
config = getLocalConfig()
# Locally relevant parameters
self.localRevisionRecordPath = config.get("greger_update_agent", "local_revision_path")
localLog.debug("Parameter: (localRevisionRecordPath) " + self.localRevisionRecordPath)
self.log.info("Greger Update Agent (GUA) successfully initiated!")
@property
def localRevisionRecord(self):
'''
Get local revision record (.gcm)
'''
# Logging
localLog = logging.getLogger(self.logPath + ".localRevisionRecord")
localLog.debug("Getting local revision record...")
# Local parameters
# revisionRecordPath = os.path.join(self._location, ".gcm")
revisionRecordPath = self.localRevisionRecordPath
localLog.debug("Attemption to get record from file...")
try:
with open(revisionRecordPath,"r") as f:
localRecord = f.read()
localLog.debug("Local revision record: " + str(localRecord))
except Exception as e:
self.log.warning("Failed to open file! - " + str(e))
self.localRevisionRecord = 0
localRecord = self.localRevisionRecord
return localRecord
@localRevisionRecord.setter
def localRevisionRecord(self, newRevision):
'''
Set local revision record (.gcm)
'''
# Logging
localLog = logging.getLogger(self.logPath + ".localRevisionRecord")
localLog.debug("Setting local revision record (.gcm) to " + str(newRevision) + "...")
# Local parameters
# revisionRecordPath = os.path.join(self._location, ".gcm")
revisionRecordPath = self.localRevisionRecordPath
localLog.debug("Attemption to write \"" + str(newRevision) + "\" to file...")
with open(revisionRecordPath,"w") as f:
f.write(str(newRevision))
self.log.info("Local revision record set: " + str(newRevision))
def getSoftwareInfo(self, rev='HEAD'):
'''
Retrieve information about a revision available on server.
'''
# Logging
localLog = logging.getLogger(self.logPath + ".getSoftwareInfo")
localLog.debug("Attempting to retrieve software revision info...")
# Locally relevant parameters
if 'guaSWSource' in GregerDatabase.settings:
guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']
else:
self.log.warning("Setting " + str(guaSWSource) + " not defined!")
return
moduleReturn = {
'revision': "",
'revision_SHA' : "",
'revision_author' : "",
'revision_date' : "",
'revision_comment' : ""
}
# Get server revision info
localLog.debug("Attempting to retrieve info from server... " + guaSWServerURI)
pCmd = "svn proplist -v -R --revprop -r " + rev
pCmd += " " + guaSWServerURI
localLog.debug(pCmd)
try:
p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
# Create list of output and remove extra white spaces
outputList = output.splitlines()[1:]
outputList = [elem.strip() for elem in outputList]
# Get revision from output
revStr = output.splitlines()[0]
revStr = revStr.split()[-1]
moduleReturn['revision'] = revStr[:-1]
localLog.debug("Revision: " + revStr[:-1])
# Get SHA
shaStr = outputList[outputList.index('git-commit')+1]
moduleReturn['revision_SHA'] = shaStr
localLog.debug("Revision SHA: " + shaStr)
# Get revision author
authorStr = outputList[outputList.index('svn:author')+1]
moduleReturn['revision_author'] = authorStr
localLog.debug("Revision author: " + authorStr)
# Get revision date
dateStr = outputList[outputList.index('svn:date')+1]
moduleReturn['revision_date'] = dateStr
localLog.debug("Revision date: " + dateStr)
# Get revision comment
commentStr = outputList[outputList.index('svn:log')+1].strip()
moduleReturn['revision_comment'] = commentStr
localLog.debug("Revision Comment: " + commentStr)
if err is not None:
localLog.debug("Error message: " + str(err))
except Exception as e:
self.log.error("Oops! Something went wrong - " + str(e))
return moduleReturn
def updateSoftware(self, swRev='HEAD'):
'''
Get and updating software from server
'''
# Logging
localLog = logging.getLogger(self.logPath + ".updateSoftware")
localLog.debug("Getting software revision " + str(swRev) + " from server and updating local client...")
# Locally relevant parameters
localLog.debug("Constructing target path for new software...")
targetRoot = self._location
targetDir = "gcm"
targetPath = os.path.join(targetRoot, targetDir)
localLog.debug("Target path: " + targetPath)
localLog.debug("Retrieving relevant parameters from server...")
if 'guaSWSource' in GregerDatabase.settings:
guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']
localLog.debug("Parameter: (guaSWSource) " + guaSWServerURI)
else:
self.log.warning("Setting " + str(guaSWSource) + " not defined!")
return
# Get software files from server
localLog.debug("Getting software files from server...")
# Compile download command
pCmd = "svn export --force -r " + str(swRev)
pCmd += " " + guaSWServerURI
pCmd += " " + targetPath
localLog.debug(pCmd)
# Execute command
try:
p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
if err is not None:
self.log.warning("Error message: " + str(err))
else:
self.log.info("Download successful!")
# Print output
for line in output.splitlines():
self.log.info(line)
except Exception as e:
self.log.error("Oops! Something went wrong - " + str(e))
# Read revision text
localLog.debug("Reading downloaded revision from \"" + output.splitlines()[-1] + "\"...")
revText = output.splitlines()[-1].split()[-1][:-1]
localLog.debug("Downloaded Revision: " + revText)
# Update local revision record
self.localRevisionRecord = revText
# Get downloaded files text
localLog.debug("Listing downloaded files...")
downloadedFiles = []
for row in output.splitlines()[:-1]:
file = os.path.join(targetRoot, [t.strip() for t in row.split()][1])
downloadedFiles.append(file)
localLog.debug("File: " + file)
# List files in directory
self.log.debug("Getting all files in local directory (after update)...")
allFiles = []
# r=root, d=directories, f = files
for r, d, f in os.walk(targetPath):
for file in f:
# allFiles.append(os.path.abspath(file))
allFiles.append(os.path.join(r, file))
localLog.debug("File: " + allFiles[-1])
# localLog.debug("File: " + os.path.join(r, file))
for dir in d:
# allFiles.append(os.path.abspath(dir))
allFiles.append(os.path.join(r, dir))
localLog.debug("Dir: " + allFiles[-1])
# localLog.debug("Dir: " + os.path.join(r, dir))
self.log.info("Identifying old files to remove (<new_files> - <all_files>)...")
diffFiles = list(set(allFiles) - set(downloadedFiles))
for file in diffFiles:
self.log.info("Removing: " + file)
try:
if os.path.isfile(file):
os.unlink(file)
elif os.path.isdir(file):
shutil.rmtree(file)
except Exception as e:
self.log.warning("Oops! Something went wrong! - " + str(e))
# List files in directory
self.log.debug("Re-getting all files in local directory...")
allFiles = []
# r=root, d=directories, f = files
for r, d, f in os.walk(targetPath):
for file in f:
allFiles.append(os.path.join(r, file))
self.log.debug("File: " + os.path.join(r, file))
for dir in d:
allFiles.append(os.path.join(r, dir))
self.log.debug("Dir: " + os.path.join(r, file))
def run(self):
'''
Run Greger Update Agent.
'''
# Logging
localLog = logging.getLogger(self.logPath + ".run")
self.log.info("Starting Greger Update Agent (GUA)...")
# Wait for Greger Client Module to start...
localLog.debug("Wait for Greger Client Module to start...")
self.ready.wait()
# Get all active threads!
allThreads = {}
for thr in enumerate():
localLog.debug(thr.name + " " + thr.__class__.__name__ +" active!")
allThreads.update({thr.__class__.__name__ : thr})
if thr.__class__.__name__ == "GregerClientModule":
localLog.debug("Greger Client Module thread found! " +
allThreads['GregerClientModule'].name)
# Start checking for updates
loopCount = 0
while not self.stopExecution.is_set():
loopCount += 1
localLog.debug("Checking for updates (" + str(loopCount) + ")...")
# Get local revision record
localLog.debug("Getting local revision record...")
localRevision = self.localRevisionRecord
# Get server revision...
localLog.debug("Getting latest software info...")
softwareInfo = self.getSoftwareInfo()
self.log.info("Revision check done! (" + str(localRevision) + ")")
if int(localRevision) == int(softwareInfo['revision']):
self.log.info("No new revision found.")
else:
self.log.info("New revision found!")
# Do update!!
localLog.debug("Attempting to update software...")
self.updateSoftware()
# Update server with updated software
localLog.debug("Attempting to update server with software info...")
allThreads['GregerDatabase'].update('about', softwareInfo)
# Tell GCM to stop all treads (except GUA)...
self.log.info("Attempting to stop all exection before restarting...")
allThreads['GregerClientModule'].stopAll(GUA=True)
# Restart Application
self.log.info("Attemption to restart application...")
restart_program()
if 'guaCheckUpdateDelay' in GregerDatabase.settings:
delayTime = GregerDatabase.settings['guaCheckUpdateDelay']['value']
else:
delayTime = 10
self.log.warning("Settings not defined! (using default=10)")
# Wait update delay
self.log.info("Waiting " + str(delayTime) + "s...")
self.stopExecution.wait(delayTime)
self.log.info("Greger Update Agent (GUA) execution stopped!")
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nGreger Update Agent (GUA) module for the Greger Client Module\n\"\"\"\n\n__author__ = \"Eric Sandbling\"\n__license__ = 'MIT'\n__status__ = 'Development'\n\n# System modules\nimport os, sys\nimport shutil\nimport logging\nimport subprocess\nfrom threading import Event\nfrom threading import Thread\nfrom threading import enumerate\n\n# Local Modules\nfrom common import getLocalConfig\nfrom common import restart_program\nfrom gdb import GregerDatabase\n# from gcm import GregerClientModule\n\nclass GregerUpdateAgent(Thread):\n \"\"\"\n Main class which holds the main sequence of the application.\n \"\"\"\n\n def __init__(self, ready=None):\n '''\n Initialize the main class\n '''\n Thread.__init__(self)\n self.ready = ready\n\n # Setup logging\n self.logPath = \"root.GUA\"\n self.log = logging.getLogger(self.logPath)\n localLog = logging.getLogger(self.logPath + \".__init__\")\n localLog.debug(\"Initiating Greger Update Agent (GUA)...\")\n\n # Stop execution handler\n self.stopExecution = Event()\n\n # Get local path\n self._location = os.path.abspath(__file__)\n self._location = self._location[:-15] # Trim gcm/__main__.py from path to get at location of application\n localLog.debug(\"Local path: \" + self._location)\n\n # Get Local Configuration Parameters\n localLog.debug(\"Getting configuration parameters from file...\")\n config = getLocalConfig()\n\n # Locally relevant parameters\n self.localRevisionRecordPath = config.get(\"greger_update_agent\", \"local_revision_path\")\n localLog.debug(\"Parameter: (localRevisionRecordPath) \" + self.localRevisionRecordPath)\n\n\n self.log.info(\"Greger Update Agent (GUA) successfully initiated!\")\n\n @property\n def localRevisionRecord(self):\n '''\n Get local revision record (.gcm)\n '''\n # Logging\n localLog = logging.getLogger(self.logPath + \".localRevisionRecord\")\n localLog.debug(\"Getting local revision record...\")\n\n # Local parameters\n # revisionRecordPath = os.path.join(self._location, \".gcm\")\n revisionRecordPath = self.localRevisionRecordPath\n\n localLog.debug(\"Attemption to get record from file...\")\n try:\n with open(revisionRecordPath,\"r\") as f:\n localRecord = f.read()\n localLog.debug(\"Local revision record: \" + str(localRecord))\n except Exception as e:\n self.log.warning(\"Failed to open file! - \" + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n '''\n Set local revision record (.gcm)\n '''\n # Logging\n localLog = logging.getLogger(self.logPath + \".localRevisionRecord\")\n localLog.debug(\"Setting local revision record (.gcm) to \" + str(newRevision) + \"...\")\n\n # Local parameters\n # revisionRecordPath = os.path.join(self._location, \".gcm\")\n revisionRecordPath = self.localRevisionRecordPath\n\n localLog.debug(\"Attemption to write \\\"\" + str(newRevision) + \"\\\" to file...\")\n with open(revisionRecordPath,\"w\") as f:\n f.write(str(newRevision))\n self.log.info(\"Local revision record set: \" + str(newRevision))\n\n def getSoftwareInfo(self, rev='HEAD'):\n '''\n Retrieve information about a revision available on server.\n '''\n # Logging\n localLog = logging.getLogger(self.logPath + \".getSoftwareInfo\")\n localLog.debug(\"Attempting to retrieve software revision info...\")\n\n # Locally relevant parameters\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n else:\n self.log.warning(\"Setting \" + str(guaSWSource) + \" not defined!\")\n return\n moduleReturn = {\n 'revision': \"\",\n 'revision_SHA' : \"\",\n 'revision_author' : \"\",\n 'revision_date' : \"\",\n 'revision_comment' : \"\"\n }\n\n # Get server revision info\n localLog.debug(\"Attempting to retrieve info from server... \" + guaSWServerURI)\n pCmd = \"svn proplist -v -R --revprop -r \" + rev\n pCmd += \" \" + guaSWServerURI\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n (output, err) = p.communicate()\n\n # Create list of output and remove extra white spaces\n outputList = output.splitlines()[1:]\n outputList = [elem.strip() for elem in outputList]\n\n # Get revision from output\n revStr = output.splitlines()[0]\n revStr = revStr.split()[-1]\n moduleReturn['revision'] = revStr[:-1]\n localLog.debug(\"Revision: \" + revStr[:-1])\n\n # Get SHA\n shaStr = outputList[outputList.index('git-commit')+1]\n moduleReturn['revision_SHA'] = shaStr\n localLog.debug(\"Revision SHA: \" + shaStr)\n\n # Get revision author\n authorStr = outputList[outputList.index('svn:author')+1]\n moduleReturn['revision_author'] = authorStr\n localLog.debug(\"Revision author: \" + authorStr)\n\n # Get revision date\n dateStr = outputList[outputList.index('svn:date')+1]\n moduleReturn['revision_date'] = dateStr\n localLog.debug(\"Revision date: \" + dateStr)\n\n # Get revision comment\n commentStr = outputList[outputList.index('svn:log')+1].strip()\n moduleReturn['revision_comment'] = commentStr\n localLog.debug(\"Revision Comment: \" + commentStr)\n\n if err is not None:\n localLog.debug(\"Error message: \" + str(err))\n\n except Exception as e:\n self.log.error(\"Oops! Something went wrong - \" + str(e))\n\n return moduleReturn\n\n def updateSoftware(self, swRev='HEAD'):\n '''\n Get and updating software from server\n '''\n # Logging\n localLog = logging.getLogger(self.logPath + \".updateSoftware\")\n localLog.debug(\"Getting software revision \" + str(swRev) + \" from server and updating local client...\")\n\n # Locally relevant parameters\n localLog.debug(\"Constructing target path for new software...\")\n targetRoot = self._location\n targetDir = \"gcm\"\n targetPath = os.path.join(targetRoot, targetDir)\n localLog.debug(\"Target path: \" + targetPath)\n localLog.debug(\"Retrieving relevant parameters from server...\")\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n localLog.debug(\"Parameter: (guaSWSource) \" + guaSWServerURI)\n else:\n self.log.warning(\"Setting \" + str(guaSWSource) + \" not defined!\")\n return\n\n # Get software files from server\n localLog.debug(\"Getting software files from server...\")\n\n # Compile download command\n pCmd = \"svn export --force -r \" + str(swRev)\n pCmd += \" \" + guaSWServerURI\n pCmd += \" \" + targetPath\n localLog.debug(pCmd)\n\n # Execute command\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n (output, err) = p.communicate()\n\n if err is not None:\n self.log.warning(\"Error message: \" + str(err))\n else:\n self.log.info(\"Download successful!\")\n # Print output\n for line in output.splitlines():\n self.log.info(line)\n\n except Exception as e:\n self.log.error(\"Oops! Something went wrong - \" + str(e))\n\n # Read revision text\n localLog.debug(\"Reading downloaded revision from \\\"\" + output.splitlines()[-1] + \"\\\"...\")\n revText = output.splitlines()[-1].split()[-1][:-1]\n localLog.debug(\"Downloaded Revision: \" + revText)\n\n # Update local revision record\n self.localRevisionRecord = revText\n\n # Get downloaded files text\n localLog.debug(\"Listing downloaded files...\")\n downloadedFiles = []\n for row in output.splitlines()[:-1]:\n file = os.path.join(targetRoot, [t.strip() for t in row.split()][1])\n downloadedFiles.append(file)\n localLog.debug(\"File: \" + file)\n\n # List files in directory\n self.log.debug(\"Getting all files in local directory (after update)...\")\n allFiles = []\n # r=root, d=directories, f = files\n for r, d, f in os.walk(targetPath):\n for file in f:\n # allFiles.append(os.path.abspath(file))\n allFiles.append(os.path.join(r, file))\n localLog.debug(\"File: \" + allFiles[-1])\n # localLog.debug(\"File: \" + os.path.join(r, file))\n for dir in d:\n # allFiles.append(os.path.abspath(dir))\n allFiles.append(os.path.join(r, dir))\n localLog.debug(\"Dir: \" + allFiles[-1])\n # localLog.debug(\"Dir: \" + os.path.join(r, dir))\n\n self.log.info(\"Identifying old files to remove (<new_files> - <all_files>)...\")\n diffFiles = list(set(allFiles) - set(downloadedFiles))\n for file in diffFiles:\n self.log.info(\"Removing: \" + file)\n try:\n if os.path.isfile(file):\n os.unlink(file)\n elif os.path.isdir(file):\n shutil.rmtree(file)\n except Exception as e:\n self.log.warning(\"Oops! Something went wrong! - \" + str(e))\n\n # List files in directory\n self.log.debug(\"Re-getting all files in local directory...\")\n allFiles = []\n # r=root, d=directories, f = files\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n self.log.debug(\"File: \" + os.path.join(r, file))\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n self.log.debug(\"Dir: \" + os.path.join(r, file))\n\n def run(self):\n '''\n Run Greger Update Agent.\n '''\n # Logging\n localLog = logging.getLogger(self.logPath + \".run\")\n self.log.info(\"Starting Greger Update Agent (GUA)...\")\n\n # Wait for Greger Client Module to start...\n localLog.debug(\"Wait for Greger Client Module to start...\")\n self.ready.wait()\n\n # Get all active threads!\n allThreads = {}\n for thr in enumerate():\n localLog.debug(thr.name + \" \" + thr.__class__.__name__ +\" active!\")\n allThreads.update({thr.__class__.__name__ : thr})\n if thr.__class__.__name__ == \"GregerClientModule\":\n localLog.debug(\"Greger Client Module thread found! \" +\n allThreads['GregerClientModule'].name)\n\n # Start checking for updates\n loopCount = 0\n while not self.stopExecution.is_set():\n loopCount += 1\n localLog.debug(\"Checking for updates (\" + str(loopCount) + \")...\")\n\n # Get local revision record\n localLog.debug(\"Getting local revision record...\")\n localRevision = self.localRevisionRecord\n\n # Get server revision...\n localLog.debug(\"Getting latest software info...\")\n softwareInfo = self.getSoftwareInfo()\n self.log.info(\"Revision check done! (\" + str(localRevision) + \")\")\n\n if int(localRevision) == int(softwareInfo['revision']):\n self.log.info(\"No new revision found.\")\n else:\n self.log.info(\"New revision found!\")\n\n # Do update!!\n localLog.debug(\"Attempting to update software...\")\n self.updateSoftware()\n\n # Update server with updated software\n localLog.debug(\"Attempting to update server with software info...\")\n allThreads['GregerDatabase'].update('about', softwareInfo)\n\n # Tell GCM to stop all treads (except GUA)...\n self.log.info(\"Attempting to stop all exection before restarting...\")\n allThreads['GregerClientModule'].stopAll(GUA=True)\n\n # Restart Application\n self.log.info(\"Attemption to restart application...\")\n restart_program()\n\n if 'guaCheckUpdateDelay' in GregerDatabase.settings:\n delayTime = GregerDatabase.settings['guaCheckUpdateDelay']['value']\n else:\n delayTime = 10\n self.log.warning(\"Settings not defined! (using default=10)\")\n\n # Wait update delay\n self.log.info(\"Waiting \" + str(delayTime) + \"s...\")\n self.stopExecution.wait(delayTime)\n\n self.log.info(\"Greger Update Agent (GUA) execution stopped!\")\n",
"<docstring token>\n__author__ = 'Eric Sandbling'\n__license__ = 'MIT'\n__status__ = 'Development'\nimport os, sys\nimport shutil\nimport logging\nimport subprocess\nfrom threading import Event\nfrom threading import Thread\nfrom threading import enumerate\nfrom common import getLocalConfig\nfrom common import restart_program\nfrom gdb import GregerDatabase\n\n\nclass GregerUpdateAgent(Thread):\n \"\"\"\n Main class which holds the main sequence of the application.\n \"\"\"\n\n def __init__(self, ready=None):\n \"\"\"\n Initialize the main class\n \"\"\"\n Thread.__init__(self)\n self.ready = ready\n self.logPath = 'root.GUA'\n self.log = logging.getLogger(self.logPath)\n localLog = logging.getLogger(self.logPath + '.__init__')\n localLog.debug('Initiating Greger Update Agent (GUA)...')\n self.stopExecution = Event()\n self._location = os.path.abspath(__file__)\n self._location = self._location[:-15]\n localLog.debug('Local path: ' + self._location)\n localLog.debug('Getting configuration parameters from file...')\n config = getLocalConfig()\n self.localRevisionRecordPath = config.get('greger_update_agent',\n 'local_revision_path')\n localLog.debug('Parameter: (localRevisionRecordPath) ' + self.\n localRevisionRecordPath)\n self.log.info('Greger Update Agent (GUA) successfully initiated!')\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n \"\"\"\n Set local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Setting local revision record (.gcm) to ' + str(\n newRevision) + '...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to write \"' + str(newRevision) +\n '\" to file...')\n with open(revisionRecordPath, 'w') as f:\n f.write(str(newRevision))\n self.log.info('Local revision record set: ' + str(newRevision))\n\n def getSoftwareInfo(self, rev='HEAD'):\n \"\"\"\n Retrieve information about a revision available on server.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.getSoftwareInfo')\n localLog.debug('Attempting to retrieve software revision info...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n moduleReturn = {'revision': '', 'revision_SHA': '',\n 'revision_author': '', 'revision_date': '', 'revision_comment': ''}\n localLog.debug('Attempting to retrieve info from server... ' +\n guaSWServerURI)\n pCmd = 'svn proplist -v -R --revprop -r ' + rev\n pCmd += ' ' + guaSWServerURI\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n outputList = output.splitlines()[1:]\n outputList = [elem.strip() for elem in outputList]\n revStr = output.splitlines()[0]\n revStr = revStr.split()[-1]\n moduleReturn['revision'] = revStr[:-1]\n localLog.debug('Revision: ' + revStr[:-1])\n shaStr = outputList[outputList.index('git-commit') + 1]\n moduleReturn['revision_SHA'] = shaStr\n localLog.debug('Revision SHA: ' + shaStr)\n authorStr = outputList[outputList.index('svn:author') + 1]\n moduleReturn['revision_author'] = authorStr\n localLog.debug('Revision author: ' + authorStr)\n dateStr = outputList[outputList.index('svn:date') + 1]\n moduleReturn['revision_date'] = dateStr\n localLog.debug('Revision date: ' + dateStr)\n commentStr = outputList[outputList.index('svn:log') + 1].strip()\n moduleReturn['revision_comment'] = commentStr\n localLog.debug('Revision Comment: ' + commentStr)\n if err is not None:\n localLog.debug('Error message: ' + str(err))\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n return moduleReturn\n\n def updateSoftware(self, swRev='HEAD'):\n \"\"\"\n Get and updating software from server\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.updateSoftware')\n localLog.debug('Getting software revision ' + str(swRev) +\n ' from server and updating local client...')\n localLog.debug('Constructing target path for new software...')\n targetRoot = self._location\n targetDir = 'gcm'\n targetPath = os.path.join(targetRoot, targetDir)\n localLog.debug('Target path: ' + targetPath)\n localLog.debug('Retrieving relevant parameters from server...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n localLog.debug('Parameter: (guaSWSource) ' + guaSWServerURI)\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n localLog.debug('Getting software files from server...')\n pCmd = 'svn export --force -r ' + str(swRev)\n pCmd += ' ' + guaSWServerURI\n pCmd += ' ' + targetPath\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n if err is not None:\n self.log.warning('Error message: ' + str(err))\n else:\n self.log.info('Download successful!')\n for line in output.splitlines():\n self.log.info(line)\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n localLog.debug('Reading downloaded revision from \"' + output.\n splitlines()[-1] + '\"...')\n revText = output.splitlines()[-1].split()[-1][:-1]\n localLog.debug('Downloaded Revision: ' + revText)\n self.localRevisionRecord = revText\n localLog.debug('Listing downloaded files...')\n downloadedFiles = []\n for row in output.splitlines()[:-1]:\n file = os.path.join(targetRoot, [t.strip() for t in row.split()][1]\n )\n downloadedFiles.append(file)\n localLog.debug('File: ' + file)\n self.log.debug('Getting all files in local directory (after update)...'\n )\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n localLog.debug('File: ' + allFiles[-1])\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n localLog.debug('Dir: ' + allFiles[-1])\n self.log.info(\n 'Identifying old files to remove (<new_files> - <all_files>)...')\n diffFiles = list(set(allFiles) - set(downloadedFiles))\n for file in diffFiles:\n self.log.info('Removing: ' + file)\n try:\n if os.path.isfile(file):\n os.unlink(file)\n elif os.path.isdir(file):\n shutil.rmtree(file)\n except Exception as e:\n self.log.warning('Oops! Something went wrong! - ' + str(e))\n self.log.debug('Re-getting all files in local directory...')\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n self.log.debug('File: ' + os.path.join(r, file))\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n self.log.debug('Dir: ' + os.path.join(r, file))\n\n def run(self):\n \"\"\"\n Run Greger Update Agent.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.run')\n self.log.info('Starting Greger Update Agent (GUA)...')\n localLog.debug('Wait for Greger Client Module to start...')\n self.ready.wait()\n allThreads = {}\n for thr in enumerate():\n localLog.debug(thr.name + ' ' + thr.__class__.__name__ + ' active!'\n )\n allThreads.update({thr.__class__.__name__: thr})\n if thr.__class__.__name__ == 'GregerClientModule':\n localLog.debug('Greger Client Module thread found! ' +\n allThreads['GregerClientModule'].name)\n loopCount = 0\n while not self.stopExecution.is_set():\n loopCount += 1\n localLog.debug('Checking for updates (' + str(loopCount) + ')...')\n localLog.debug('Getting local revision record...')\n localRevision = self.localRevisionRecord\n localLog.debug('Getting latest software info...')\n softwareInfo = self.getSoftwareInfo()\n self.log.info('Revision check done! (' + str(localRevision) + ')')\n if int(localRevision) == int(softwareInfo['revision']):\n self.log.info('No new revision found.')\n else:\n self.log.info('New revision found!')\n localLog.debug('Attempting to update software...')\n self.updateSoftware()\n localLog.debug(\n 'Attempting to update server with software info...')\n allThreads['GregerDatabase'].update('about', softwareInfo)\n self.log.info(\n 'Attempting to stop all exection before restarting...')\n allThreads['GregerClientModule'].stopAll(GUA=True)\n self.log.info('Attemption to restart application...')\n restart_program()\n if 'guaCheckUpdateDelay' in GregerDatabase.settings:\n delayTime = GregerDatabase.settings['guaCheckUpdateDelay'][\n 'value']\n else:\n delayTime = 10\n self.log.warning('Settings not defined! (using default=10)')\n self.log.info('Waiting ' + str(delayTime) + 's...')\n self.stopExecution.wait(delayTime)\n self.log.info('Greger Update Agent (GUA) execution stopped!')\n",
"<docstring token>\n__author__ = 'Eric Sandbling'\n__license__ = 'MIT'\n__status__ = 'Development'\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n \"\"\"\n Main class which holds the main sequence of the application.\n \"\"\"\n\n def __init__(self, ready=None):\n \"\"\"\n Initialize the main class\n \"\"\"\n Thread.__init__(self)\n self.ready = ready\n self.logPath = 'root.GUA'\n self.log = logging.getLogger(self.logPath)\n localLog = logging.getLogger(self.logPath + '.__init__')\n localLog.debug('Initiating Greger Update Agent (GUA)...')\n self.stopExecution = Event()\n self._location = os.path.abspath(__file__)\n self._location = self._location[:-15]\n localLog.debug('Local path: ' + self._location)\n localLog.debug('Getting configuration parameters from file...')\n config = getLocalConfig()\n self.localRevisionRecordPath = config.get('greger_update_agent',\n 'local_revision_path')\n localLog.debug('Parameter: (localRevisionRecordPath) ' + self.\n localRevisionRecordPath)\n self.log.info('Greger Update Agent (GUA) successfully initiated!')\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n \"\"\"\n Set local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Setting local revision record (.gcm) to ' + str(\n newRevision) + '...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to write \"' + str(newRevision) +\n '\" to file...')\n with open(revisionRecordPath, 'w') as f:\n f.write(str(newRevision))\n self.log.info('Local revision record set: ' + str(newRevision))\n\n def getSoftwareInfo(self, rev='HEAD'):\n \"\"\"\n Retrieve information about a revision available on server.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.getSoftwareInfo')\n localLog.debug('Attempting to retrieve software revision info...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n moduleReturn = {'revision': '', 'revision_SHA': '',\n 'revision_author': '', 'revision_date': '', 'revision_comment': ''}\n localLog.debug('Attempting to retrieve info from server... ' +\n guaSWServerURI)\n pCmd = 'svn proplist -v -R --revprop -r ' + rev\n pCmd += ' ' + guaSWServerURI\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n outputList = output.splitlines()[1:]\n outputList = [elem.strip() for elem in outputList]\n revStr = output.splitlines()[0]\n revStr = revStr.split()[-1]\n moduleReturn['revision'] = revStr[:-1]\n localLog.debug('Revision: ' + revStr[:-1])\n shaStr = outputList[outputList.index('git-commit') + 1]\n moduleReturn['revision_SHA'] = shaStr\n localLog.debug('Revision SHA: ' + shaStr)\n authorStr = outputList[outputList.index('svn:author') + 1]\n moduleReturn['revision_author'] = authorStr\n localLog.debug('Revision author: ' + authorStr)\n dateStr = outputList[outputList.index('svn:date') + 1]\n moduleReturn['revision_date'] = dateStr\n localLog.debug('Revision date: ' + dateStr)\n commentStr = outputList[outputList.index('svn:log') + 1].strip()\n moduleReturn['revision_comment'] = commentStr\n localLog.debug('Revision Comment: ' + commentStr)\n if err is not None:\n localLog.debug('Error message: ' + str(err))\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n return moduleReturn\n\n def updateSoftware(self, swRev='HEAD'):\n \"\"\"\n Get and updating software from server\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.updateSoftware')\n localLog.debug('Getting software revision ' + str(swRev) +\n ' from server and updating local client...')\n localLog.debug('Constructing target path for new software...')\n targetRoot = self._location\n targetDir = 'gcm'\n targetPath = os.path.join(targetRoot, targetDir)\n localLog.debug('Target path: ' + targetPath)\n localLog.debug('Retrieving relevant parameters from server...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n localLog.debug('Parameter: (guaSWSource) ' + guaSWServerURI)\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n localLog.debug('Getting software files from server...')\n pCmd = 'svn export --force -r ' + str(swRev)\n pCmd += ' ' + guaSWServerURI\n pCmd += ' ' + targetPath\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n if err is not None:\n self.log.warning('Error message: ' + str(err))\n else:\n self.log.info('Download successful!')\n for line in output.splitlines():\n self.log.info(line)\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n localLog.debug('Reading downloaded revision from \"' + output.\n splitlines()[-1] + '\"...')\n revText = output.splitlines()[-1].split()[-1][:-1]\n localLog.debug('Downloaded Revision: ' + revText)\n self.localRevisionRecord = revText\n localLog.debug('Listing downloaded files...')\n downloadedFiles = []\n for row in output.splitlines()[:-1]:\n file = os.path.join(targetRoot, [t.strip() for t in row.split()][1]\n )\n downloadedFiles.append(file)\n localLog.debug('File: ' + file)\n self.log.debug('Getting all files in local directory (after update)...'\n )\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n localLog.debug('File: ' + allFiles[-1])\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n localLog.debug('Dir: ' + allFiles[-1])\n self.log.info(\n 'Identifying old files to remove (<new_files> - <all_files>)...')\n diffFiles = list(set(allFiles) - set(downloadedFiles))\n for file in diffFiles:\n self.log.info('Removing: ' + file)\n try:\n if os.path.isfile(file):\n os.unlink(file)\n elif os.path.isdir(file):\n shutil.rmtree(file)\n except Exception as e:\n self.log.warning('Oops! Something went wrong! - ' + str(e))\n self.log.debug('Re-getting all files in local directory...')\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n self.log.debug('File: ' + os.path.join(r, file))\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n self.log.debug('Dir: ' + os.path.join(r, file))\n\n def run(self):\n \"\"\"\n Run Greger Update Agent.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.run')\n self.log.info('Starting Greger Update Agent (GUA)...')\n localLog.debug('Wait for Greger Client Module to start...')\n self.ready.wait()\n allThreads = {}\n for thr in enumerate():\n localLog.debug(thr.name + ' ' + thr.__class__.__name__ + ' active!'\n )\n allThreads.update({thr.__class__.__name__: thr})\n if thr.__class__.__name__ == 'GregerClientModule':\n localLog.debug('Greger Client Module thread found! ' +\n allThreads['GregerClientModule'].name)\n loopCount = 0\n while not self.stopExecution.is_set():\n loopCount += 1\n localLog.debug('Checking for updates (' + str(loopCount) + ')...')\n localLog.debug('Getting local revision record...')\n localRevision = self.localRevisionRecord\n localLog.debug('Getting latest software info...')\n softwareInfo = self.getSoftwareInfo()\n self.log.info('Revision check done! (' + str(localRevision) + ')')\n if int(localRevision) == int(softwareInfo['revision']):\n self.log.info('No new revision found.')\n else:\n self.log.info('New revision found!')\n localLog.debug('Attempting to update software...')\n self.updateSoftware()\n localLog.debug(\n 'Attempting to update server with software info...')\n allThreads['GregerDatabase'].update('about', softwareInfo)\n self.log.info(\n 'Attempting to stop all exection before restarting...')\n allThreads['GregerClientModule'].stopAll(GUA=True)\n self.log.info('Attemption to restart application...')\n restart_program()\n if 'guaCheckUpdateDelay' in GregerDatabase.settings:\n delayTime = GregerDatabase.settings['guaCheckUpdateDelay'][\n 'value']\n else:\n delayTime = 10\n self.log.warning('Settings not defined! (using default=10)')\n self.log.info('Waiting ' + str(delayTime) + 's...')\n self.stopExecution.wait(delayTime)\n self.log.info('Greger Update Agent (GUA) execution stopped!')\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n \"\"\"\n Main class which holds the main sequence of the application.\n \"\"\"\n\n def __init__(self, ready=None):\n \"\"\"\n Initialize the main class\n \"\"\"\n Thread.__init__(self)\n self.ready = ready\n self.logPath = 'root.GUA'\n self.log = logging.getLogger(self.logPath)\n localLog = logging.getLogger(self.logPath + '.__init__')\n localLog.debug('Initiating Greger Update Agent (GUA)...')\n self.stopExecution = Event()\n self._location = os.path.abspath(__file__)\n self._location = self._location[:-15]\n localLog.debug('Local path: ' + self._location)\n localLog.debug('Getting configuration parameters from file...')\n config = getLocalConfig()\n self.localRevisionRecordPath = config.get('greger_update_agent',\n 'local_revision_path')\n localLog.debug('Parameter: (localRevisionRecordPath) ' + self.\n localRevisionRecordPath)\n self.log.info('Greger Update Agent (GUA) successfully initiated!')\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n \"\"\"\n Set local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Setting local revision record (.gcm) to ' + str(\n newRevision) + '...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to write \"' + str(newRevision) +\n '\" to file...')\n with open(revisionRecordPath, 'w') as f:\n f.write(str(newRevision))\n self.log.info('Local revision record set: ' + str(newRevision))\n\n def getSoftwareInfo(self, rev='HEAD'):\n \"\"\"\n Retrieve information about a revision available on server.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.getSoftwareInfo')\n localLog.debug('Attempting to retrieve software revision info...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n moduleReturn = {'revision': '', 'revision_SHA': '',\n 'revision_author': '', 'revision_date': '', 'revision_comment': ''}\n localLog.debug('Attempting to retrieve info from server... ' +\n guaSWServerURI)\n pCmd = 'svn proplist -v -R --revprop -r ' + rev\n pCmd += ' ' + guaSWServerURI\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n outputList = output.splitlines()[1:]\n outputList = [elem.strip() for elem in outputList]\n revStr = output.splitlines()[0]\n revStr = revStr.split()[-1]\n moduleReturn['revision'] = revStr[:-1]\n localLog.debug('Revision: ' + revStr[:-1])\n shaStr = outputList[outputList.index('git-commit') + 1]\n moduleReturn['revision_SHA'] = shaStr\n localLog.debug('Revision SHA: ' + shaStr)\n authorStr = outputList[outputList.index('svn:author') + 1]\n moduleReturn['revision_author'] = authorStr\n localLog.debug('Revision author: ' + authorStr)\n dateStr = outputList[outputList.index('svn:date') + 1]\n moduleReturn['revision_date'] = dateStr\n localLog.debug('Revision date: ' + dateStr)\n commentStr = outputList[outputList.index('svn:log') + 1].strip()\n moduleReturn['revision_comment'] = commentStr\n localLog.debug('Revision Comment: ' + commentStr)\n if err is not None:\n localLog.debug('Error message: ' + str(err))\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n return moduleReturn\n\n def updateSoftware(self, swRev='HEAD'):\n \"\"\"\n Get and updating software from server\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.updateSoftware')\n localLog.debug('Getting software revision ' + str(swRev) +\n ' from server and updating local client...')\n localLog.debug('Constructing target path for new software...')\n targetRoot = self._location\n targetDir = 'gcm'\n targetPath = os.path.join(targetRoot, targetDir)\n localLog.debug('Target path: ' + targetPath)\n localLog.debug('Retrieving relevant parameters from server...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n localLog.debug('Parameter: (guaSWSource) ' + guaSWServerURI)\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n localLog.debug('Getting software files from server...')\n pCmd = 'svn export --force -r ' + str(swRev)\n pCmd += ' ' + guaSWServerURI\n pCmd += ' ' + targetPath\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n if err is not None:\n self.log.warning('Error message: ' + str(err))\n else:\n self.log.info('Download successful!')\n for line in output.splitlines():\n self.log.info(line)\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n localLog.debug('Reading downloaded revision from \"' + output.\n splitlines()[-1] + '\"...')\n revText = output.splitlines()[-1].split()[-1][:-1]\n localLog.debug('Downloaded Revision: ' + revText)\n self.localRevisionRecord = revText\n localLog.debug('Listing downloaded files...')\n downloadedFiles = []\n for row in output.splitlines()[:-1]:\n file = os.path.join(targetRoot, [t.strip() for t in row.split()][1]\n )\n downloadedFiles.append(file)\n localLog.debug('File: ' + file)\n self.log.debug('Getting all files in local directory (after update)...'\n )\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n localLog.debug('File: ' + allFiles[-1])\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n localLog.debug('Dir: ' + allFiles[-1])\n self.log.info(\n 'Identifying old files to remove (<new_files> - <all_files>)...')\n diffFiles = list(set(allFiles) - set(downloadedFiles))\n for file in diffFiles:\n self.log.info('Removing: ' + file)\n try:\n if os.path.isfile(file):\n os.unlink(file)\n elif os.path.isdir(file):\n shutil.rmtree(file)\n except Exception as e:\n self.log.warning('Oops! Something went wrong! - ' + str(e))\n self.log.debug('Re-getting all files in local directory...')\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n self.log.debug('File: ' + os.path.join(r, file))\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n self.log.debug('Dir: ' + os.path.join(r, file))\n\n def run(self):\n \"\"\"\n Run Greger Update Agent.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.run')\n self.log.info('Starting Greger Update Agent (GUA)...')\n localLog.debug('Wait for Greger Client Module to start...')\n self.ready.wait()\n allThreads = {}\n for thr in enumerate():\n localLog.debug(thr.name + ' ' + thr.__class__.__name__ + ' active!'\n )\n allThreads.update({thr.__class__.__name__: thr})\n if thr.__class__.__name__ == 'GregerClientModule':\n localLog.debug('Greger Client Module thread found! ' +\n allThreads['GregerClientModule'].name)\n loopCount = 0\n while not self.stopExecution.is_set():\n loopCount += 1\n localLog.debug('Checking for updates (' + str(loopCount) + ')...')\n localLog.debug('Getting local revision record...')\n localRevision = self.localRevisionRecord\n localLog.debug('Getting latest software info...')\n softwareInfo = self.getSoftwareInfo()\n self.log.info('Revision check done! (' + str(localRevision) + ')')\n if int(localRevision) == int(softwareInfo['revision']):\n self.log.info('No new revision found.')\n else:\n self.log.info('New revision found!')\n localLog.debug('Attempting to update software...')\n self.updateSoftware()\n localLog.debug(\n 'Attempting to update server with software info...')\n allThreads['GregerDatabase'].update('about', softwareInfo)\n self.log.info(\n 'Attempting to stop all exection before restarting...')\n allThreads['GregerClientModule'].stopAll(GUA=True)\n self.log.info('Attemption to restart application...')\n restart_program()\n if 'guaCheckUpdateDelay' in GregerDatabase.settings:\n delayTime = GregerDatabase.settings['guaCheckUpdateDelay'][\n 'value']\n else:\n delayTime = 10\n self.log.warning('Settings not defined! (using default=10)')\n self.log.info('Waiting ' + str(delayTime) + 's...')\n self.stopExecution.wait(delayTime)\n self.log.info('Greger Update Agent (GUA) execution stopped!')\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n <docstring token>\n\n def __init__(self, ready=None):\n \"\"\"\n Initialize the main class\n \"\"\"\n Thread.__init__(self)\n self.ready = ready\n self.logPath = 'root.GUA'\n self.log = logging.getLogger(self.logPath)\n localLog = logging.getLogger(self.logPath + '.__init__')\n localLog.debug('Initiating Greger Update Agent (GUA)...')\n self.stopExecution = Event()\n self._location = os.path.abspath(__file__)\n self._location = self._location[:-15]\n localLog.debug('Local path: ' + self._location)\n localLog.debug('Getting configuration parameters from file...')\n config = getLocalConfig()\n self.localRevisionRecordPath = config.get('greger_update_agent',\n 'local_revision_path')\n localLog.debug('Parameter: (localRevisionRecordPath) ' + self.\n localRevisionRecordPath)\n self.log.info('Greger Update Agent (GUA) successfully initiated!')\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n \"\"\"\n Set local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Setting local revision record (.gcm) to ' + str(\n newRevision) + '...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to write \"' + str(newRevision) +\n '\" to file...')\n with open(revisionRecordPath, 'w') as f:\n f.write(str(newRevision))\n self.log.info('Local revision record set: ' + str(newRevision))\n\n def getSoftwareInfo(self, rev='HEAD'):\n \"\"\"\n Retrieve information about a revision available on server.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.getSoftwareInfo')\n localLog.debug('Attempting to retrieve software revision info...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n moduleReturn = {'revision': '', 'revision_SHA': '',\n 'revision_author': '', 'revision_date': '', 'revision_comment': ''}\n localLog.debug('Attempting to retrieve info from server... ' +\n guaSWServerURI)\n pCmd = 'svn proplist -v -R --revprop -r ' + rev\n pCmd += ' ' + guaSWServerURI\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n outputList = output.splitlines()[1:]\n outputList = [elem.strip() for elem in outputList]\n revStr = output.splitlines()[0]\n revStr = revStr.split()[-1]\n moduleReturn['revision'] = revStr[:-1]\n localLog.debug('Revision: ' + revStr[:-1])\n shaStr = outputList[outputList.index('git-commit') + 1]\n moduleReturn['revision_SHA'] = shaStr\n localLog.debug('Revision SHA: ' + shaStr)\n authorStr = outputList[outputList.index('svn:author') + 1]\n moduleReturn['revision_author'] = authorStr\n localLog.debug('Revision author: ' + authorStr)\n dateStr = outputList[outputList.index('svn:date') + 1]\n moduleReturn['revision_date'] = dateStr\n localLog.debug('Revision date: ' + dateStr)\n commentStr = outputList[outputList.index('svn:log') + 1].strip()\n moduleReturn['revision_comment'] = commentStr\n localLog.debug('Revision Comment: ' + commentStr)\n if err is not None:\n localLog.debug('Error message: ' + str(err))\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n return moduleReturn\n\n def updateSoftware(self, swRev='HEAD'):\n \"\"\"\n Get and updating software from server\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.updateSoftware')\n localLog.debug('Getting software revision ' + str(swRev) +\n ' from server and updating local client...')\n localLog.debug('Constructing target path for new software...')\n targetRoot = self._location\n targetDir = 'gcm'\n targetPath = os.path.join(targetRoot, targetDir)\n localLog.debug('Target path: ' + targetPath)\n localLog.debug('Retrieving relevant parameters from server...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n localLog.debug('Parameter: (guaSWSource) ' + guaSWServerURI)\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n localLog.debug('Getting software files from server...')\n pCmd = 'svn export --force -r ' + str(swRev)\n pCmd += ' ' + guaSWServerURI\n pCmd += ' ' + targetPath\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n if err is not None:\n self.log.warning('Error message: ' + str(err))\n else:\n self.log.info('Download successful!')\n for line in output.splitlines():\n self.log.info(line)\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n localLog.debug('Reading downloaded revision from \"' + output.\n splitlines()[-1] + '\"...')\n revText = output.splitlines()[-1].split()[-1][:-1]\n localLog.debug('Downloaded Revision: ' + revText)\n self.localRevisionRecord = revText\n localLog.debug('Listing downloaded files...')\n downloadedFiles = []\n for row in output.splitlines()[:-1]:\n file = os.path.join(targetRoot, [t.strip() for t in row.split()][1]\n )\n downloadedFiles.append(file)\n localLog.debug('File: ' + file)\n self.log.debug('Getting all files in local directory (after update)...'\n )\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n localLog.debug('File: ' + allFiles[-1])\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n localLog.debug('Dir: ' + allFiles[-1])\n self.log.info(\n 'Identifying old files to remove (<new_files> - <all_files>)...')\n diffFiles = list(set(allFiles) - set(downloadedFiles))\n for file in diffFiles:\n self.log.info('Removing: ' + file)\n try:\n if os.path.isfile(file):\n os.unlink(file)\n elif os.path.isdir(file):\n shutil.rmtree(file)\n except Exception as e:\n self.log.warning('Oops! Something went wrong! - ' + str(e))\n self.log.debug('Re-getting all files in local directory...')\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n self.log.debug('File: ' + os.path.join(r, file))\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n self.log.debug('Dir: ' + os.path.join(r, file))\n\n def run(self):\n \"\"\"\n Run Greger Update Agent.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.run')\n self.log.info('Starting Greger Update Agent (GUA)...')\n localLog.debug('Wait for Greger Client Module to start...')\n self.ready.wait()\n allThreads = {}\n for thr in enumerate():\n localLog.debug(thr.name + ' ' + thr.__class__.__name__ + ' active!'\n )\n allThreads.update({thr.__class__.__name__: thr})\n if thr.__class__.__name__ == 'GregerClientModule':\n localLog.debug('Greger Client Module thread found! ' +\n allThreads['GregerClientModule'].name)\n loopCount = 0\n while not self.stopExecution.is_set():\n loopCount += 1\n localLog.debug('Checking for updates (' + str(loopCount) + ')...')\n localLog.debug('Getting local revision record...')\n localRevision = self.localRevisionRecord\n localLog.debug('Getting latest software info...')\n softwareInfo = self.getSoftwareInfo()\n self.log.info('Revision check done! (' + str(localRevision) + ')')\n if int(localRevision) == int(softwareInfo['revision']):\n self.log.info('No new revision found.')\n else:\n self.log.info('New revision found!')\n localLog.debug('Attempting to update software...')\n self.updateSoftware()\n localLog.debug(\n 'Attempting to update server with software info...')\n allThreads['GregerDatabase'].update('about', softwareInfo)\n self.log.info(\n 'Attempting to stop all exection before restarting...')\n allThreads['GregerClientModule'].stopAll(GUA=True)\n self.log.info('Attemption to restart application...')\n restart_program()\n if 'guaCheckUpdateDelay' in GregerDatabase.settings:\n delayTime = GregerDatabase.settings['guaCheckUpdateDelay'][\n 'value']\n else:\n delayTime = 10\n self.log.warning('Settings not defined! (using default=10)')\n self.log.info('Waiting ' + str(delayTime) + 's...')\n self.stopExecution.wait(delayTime)\n self.log.info('Greger Update Agent (GUA) execution stopped!')\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n <docstring token>\n\n def __init__(self, ready=None):\n \"\"\"\n Initialize the main class\n \"\"\"\n Thread.__init__(self)\n self.ready = ready\n self.logPath = 'root.GUA'\n self.log = logging.getLogger(self.logPath)\n localLog = logging.getLogger(self.logPath + '.__init__')\n localLog.debug('Initiating Greger Update Agent (GUA)...')\n self.stopExecution = Event()\n self._location = os.path.abspath(__file__)\n self._location = self._location[:-15]\n localLog.debug('Local path: ' + self._location)\n localLog.debug('Getting configuration parameters from file...')\n config = getLocalConfig()\n self.localRevisionRecordPath = config.get('greger_update_agent',\n 'local_revision_path')\n localLog.debug('Parameter: (localRevisionRecordPath) ' + self.\n localRevisionRecordPath)\n self.log.info('Greger Update Agent (GUA) successfully initiated!')\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n \"\"\"\n Set local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Setting local revision record (.gcm) to ' + str(\n newRevision) + '...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to write \"' + str(newRevision) +\n '\" to file...')\n with open(revisionRecordPath, 'w') as f:\n f.write(str(newRevision))\n self.log.info('Local revision record set: ' + str(newRevision))\n\n def getSoftwareInfo(self, rev='HEAD'):\n \"\"\"\n Retrieve information about a revision available on server.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.getSoftwareInfo')\n localLog.debug('Attempting to retrieve software revision info...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n moduleReturn = {'revision': '', 'revision_SHA': '',\n 'revision_author': '', 'revision_date': '', 'revision_comment': ''}\n localLog.debug('Attempting to retrieve info from server... ' +\n guaSWServerURI)\n pCmd = 'svn proplist -v -R --revprop -r ' + rev\n pCmd += ' ' + guaSWServerURI\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n outputList = output.splitlines()[1:]\n outputList = [elem.strip() for elem in outputList]\n revStr = output.splitlines()[0]\n revStr = revStr.split()[-1]\n moduleReturn['revision'] = revStr[:-1]\n localLog.debug('Revision: ' + revStr[:-1])\n shaStr = outputList[outputList.index('git-commit') + 1]\n moduleReturn['revision_SHA'] = shaStr\n localLog.debug('Revision SHA: ' + shaStr)\n authorStr = outputList[outputList.index('svn:author') + 1]\n moduleReturn['revision_author'] = authorStr\n localLog.debug('Revision author: ' + authorStr)\n dateStr = outputList[outputList.index('svn:date') + 1]\n moduleReturn['revision_date'] = dateStr\n localLog.debug('Revision date: ' + dateStr)\n commentStr = outputList[outputList.index('svn:log') + 1].strip()\n moduleReturn['revision_comment'] = commentStr\n localLog.debug('Revision Comment: ' + commentStr)\n if err is not None:\n localLog.debug('Error message: ' + str(err))\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n return moduleReturn\n\n def updateSoftware(self, swRev='HEAD'):\n \"\"\"\n Get and updating software from server\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.updateSoftware')\n localLog.debug('Getting software revision ' + str(swRev) +\n ' from server and updating local client...')\n localLog.debug('Constructing target path for new software...')\n targetRoot = self._location\n targetDir = 'gcm'\n targetPath = os.path.join(targetRoot, targetDir)\n localLog.debug('Target path: ' + targetPath)\n localLog.debug('Retrieving relevant parameters from server...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n localLog.debug('Parameter: (guaSWSource) ' + guaSWServerURI)\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n localLog.debug('Getting software files from server...')\n pCmd = 'svn export --force -r ' + str(swRev)\n pCmd += ' ' + guaSWServerURI\n pCmd += ' ' + targetPath\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n if err is not None:\n self.log.warning('Error message: ' + str(err))\n else:\n self.log.info('Download successful!')\n for line in output.splitlines():\n self.log.info(line)\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n localLog.debug('Reading downloaded revision from \"' + output.\n splitlines()[-1] + '\"...')\n revText = output.splitlines()[-1].split()[-1][:-1]\n localLog.debug('Downloaded Revision: ' + revText)\n self.localRevisionRecord = revText\n localLog.debug('Listing downloaded files...')\n downloadedFiles = []\n for row in output.splitlines()[:-1]:\n file = os.path.join(targetRoot, [t.strip() for t in row.split()][1]\n )\n downloadedFiles.append(file)\n localLog.debug('File: ' + file)\n self.log.debug('Getting all files in local directory (after update)...'\n )\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n localLog.debug('File: ' + allFiles[-1])\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n localLog.debug('Dir: ' + allFiles[-1])\n self.log.info(\n 'Identifying old files to remove (<new_files> - <all_files>)...')\n diffFiles = list(set(allFiles) - set(downloadedFiles))\n for file in diffFiles:\n self.log.info('Removing: ' + file)\n try:\n if os.path.isfile(file):\n os.unlink(file)\n elif os.path.isdir(file):\n shutil.rmtree(file)\n except Exception as e:\n self.log.warning('Oops! Something went wrong! - ' + str(e))\n self.log.debug('Re-getting all files in local directory...')\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n self.log.debug('File: ' + os.path.join(r, file))\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n self.log.debug('Dir: ' + os.path.join(r, file))\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n <docstring token>\n <function token>\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n \"\"\"\n Set local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Setting local revision record (.gcm) to ' + str(\n newRevision) + '...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to write \"' + str(newRevision) +\n '\" to file...')\n with open(revisionRecordPath, 'w') as f:\n f.write(str(newRevision))\n self.log.info('Local revision record set: ' + str(newRevision))\n\n def getSoftwareInfo(self, rev='HEAD'):\n \"\"\"\n Retrieve information about a revision available on server.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.getSoftwareInfo')\n localLog.debug('Attempting to retrieve software revision info...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n moduleReturn = {'revision': '', 'revision_SHA': '',\n 'revision_author': '', 'revision_date': '', 'revision_comment': ''}\n localLog.debug('Attempting to retrieve info from server... ' +\n guaSWServerURI)\n pCmd = 'svn proplist -v -R --revprop -r ' + rev\n pCmd += ' ' + guaSWServerURI\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n outputList = output.splitlines()[1:]\n outputList = [elem.strip() for elem in outputList]\n revStr = output.splitlines()[0]\n revStr = revStr.split()[-1]\n moduleReturn['revision'] = revStr[:-1]\n localLog.debug('Revision: ' + revStr[:-1])\n shaStr = outputList[outputList.index('git-commit') + 1]\n moduleReturn['revision_SHA'] = shaStr\n localLog.debug('Revision SHA: ' + shaStr)\n authorStr = outputList[outputList.index('svn:author') + 1]\n moduleReturn['revision_author'] = authorStr\n localLog.debug('Revision author: ' + authorStr)\n dateStr = outputList[outputList.index('svn:date') + 1]\n moduleReturn['revision_date'] = dateStr\n localLog.debug('Revision date: ' + dateStr)\n commentStr = outputList[outputList.index('svn:log') + 1].strip()\n moduleReturn['revision_comment'] = commentStr\n localLog.debug('Revision Comment: ' + commentStr)\n if err is not None:\n localLog.debug('Error message: ' + str(err))\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n return moduleReturn\n\n def updateSoftware(self, swRev='HEAD'):\n \"\"\"\n Get and updating software from server\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.updateSoftware')\n localLog.debug('Getting software revision ' + str(swRev) +\n ' from server and updating local client...')\n localLog.debug('Constructing target path for new software...')\n targetRoot = self._location\n targetDir = 'gcm'\n targetPath = os.path.join(targetRoot, targetDir)\n localLog.debug('Target path: ' + targetPath)\n localLog.debug('Retrieving relevant parameters from server...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n localLog.debug('Parameter: (guaSWSource) ' + guaSWServerURI)\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n localLog.debug('Getting software files from server...')\n pCmd = 'svn export --force -r ' + str(swRev)\n pCmd += ' ' + guaSWServerURI\n pCmd += ' ' + targetPath\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n if err is not None:\n self.log.warning('Error message: ' + str(err))\n else:\n self.log.info('Download successful!')\n for line in output.splitlines():\n self.log.info(line)\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n localLog.debug('Reading downloaded revision from \"' + output.\n splitlines()[-1] + '\"...')\n revText = output.splitlines()[-1].split()[-1][:-1]\n localLog.debug('Downloaded Revision: ' + revText)\n self.localRevisionRecord = revText\n localLog.debug('Listing downloaded files...')\n downloadedFiles = []\n for row in output.splitlines()[:-1]:\n file = os.path.join(targetRoot, [t.strip() for t in row.split()][1]\n )\n downloadedFiles.append(file)\n localLog.debug('File: ' + file)\n self.log.debug('Getting all files in local directory (after update)...'\n )\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n localLog.debug('File: ' + allFiles[-1])\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n localLog.debug('Dir: ' + allFiles[-1])\n self.log.info(\n 'Identifying old files to remove (<new_files> - <all_files>)...')\n diffFiles = list(set(allFiles) - set(downloadedFiles))\n for file in diffFiles:\n self.log.info('Removing: ' + file)\n try:\n if os.path.isfile(file):\n os.unlink(file)\n elif os.path.isdir(file):\n shutil.rmtree(file)\n except Exception as e:\n self.log.warning('Oops! Something went wrong! - ' + str(e))\n self.log.debug('Re-getting all files in local directory...')\n allFiles = []\n for r, d, f in os.walk(targetPath):\n for file in f:\n allFiles.append(os.path.join(r, file))\n self.log.debug('File: ' + os.path.join(r, file))\n for dir in d:\n allFiles.append(os.path.join(r, dir))\n self.log.debug('Dir: ' + os.path.join(r, file))\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n <docstring token>\n <function token>\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n \"\"\"\n Set local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Setting local revision record (.gcm) to ' + str(\n newRevision) + '...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to write \"' + str(newRevision) +\n '\" to file...')\n with open(revisionRecordPath, 'w') as f:\n f.write(str(newRevision))\n self.log.info('Local revision record set: ' + str(newRevision))\n\n def getSoftwareInfo(self, rev='HEAD'):\n \"\"\"\n Retrieve information about a revision available on server.\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.getSoftwareInfo')\n localLog.debug('Attempting to retrieve software revision info...')\n if 'guaSWSource' in GregerDatabase.settings:\n guaSWServerURI = GregerDatabase.settings['guaSWSource']['value']\n else:\n self.log.warning('Setting ' + str(guaSWSource) + ' not defined!')\n return\n moduleReturn = {'revision': '', 'revision_SHA': '',\n 'revision_author': '', 'revision_date': '', 'revision_comment': ''}\n localLog.debug('Attempting to retrieve info from server... ' +\n guaSWServerURI)\n pCmd = 'svn proplist -v -R --revprop -r ' + rev\n pCmd += ' ' + guaSWServerURI\n localLog.debug(pCmd)\n try:\n p = subprocess.Popen(pCmd, stdout=subprocess.PIPE, shell=True)\n output, err = p.communicate()\n outputList = output.splitlines()[1:]\n outputList = [elem.strip() for elem in outputList]\n revStr = output.splitlines()[0]\n revStr = revStr.split()[-1]\n moduleReturn['revision'] = revStr[:-1]\n localLog.debug('Revision: ' + revStr[:-1])\n shaStr = outputList[outputList.index('git-commit') + 1]\n moduleReturn['revision_SHA'] = shaStr\n localLog.debug('Revision SHA: ' + shaStr)\n authorStr = outputList[outputList.index('svn:author') + 1]\n moduleReturn['revision_author'] = authorStr\n localLog.debug('Revision author: ' + authorStr)\n dateStr = outputList[outputList.index('svn:date') + 1]\n moduleReturn['revision_date'] = dateStr\n localLog.debug('Revision date: ' + dateStr)\n commentStr = outputList[outputList.index('svn:log') + 1].strip()\n moduleReturn['revision_comment'] = commentStr\n localLog.debug('Revision Comment: ' + commentStr)\n if err is not None:\n localLog.debug('Error message: ' + str(err))\n except Exception as e:\n self.log.error('Oops! Something went wrong - ' + str(e))\n return moduleReturn\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n <docstring token>\n <function token>\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n\n @localRevisionRecord.setter\n def localRevisionRecord(self, newRevision):\n \"\"\"\n Set local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Setting local revision record (.gcm) to ' + str(\n newRevision) + '...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to write \"' + str(newRevision) +\n '\" to file...')\n with open(revisionRecordPath, 'w') as f:\n f.write(str(newRevision))\n self.log.info('Local revision record set: ' + str(newRevision))\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n <docstring token>\n <function token>\n\n @property\n def localRevisionRecord(self):\n \"\"\"\n Get local revision record (.gcm)\n \"\"\"\n localLog = logging.getLogger(self.logPath + '.localRevisionRecord')\n localLog.debug('Getting local revision record...')\n revisionRecordPath = self.localRevisionRecordPath\n localLog.debug('Attemption to get record from file...')\n try:\n with open(revisionRecordPath, 'r') as f:\n localRecord = f.read()\n localLog.debug('Local revision record: ' + str(localRecord))\n except Exception as e:\n self.log.warning('Failed to open file! - ' + str(e))\n self.localRevisionRecord = 0\n localRecord = self.localRevisionRecord\n return localRecord\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n\n\nclass GregerUpdateAgent(Thread):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<class token>\n"
] | false |
594 |
9bd659bb3bf812e48710f625bb65a848d3a8d074
|
#THIS BUILD WORKS, BUT IS VERY SLOW. CURRENTLY YIELDS A DECENT SCORE, NOT GREAT
alphabet = "abcdefghijklmnopqrstuvwxyz"
def author():
return ""
def student_id():
return ""
def fill_words(pattern,words,scoring_f,minlen,maxlen):
foundWords = find_words(pattern,words,scoring_f,minlen,maxlen)
foundWords = foundWords + pattern[len(foundWords):]
return foundWords
def find_words(pattern,words,scoring_f,minlen,maxlen):
patternCopy = pattern
bestWord=("",0)
bestState=[("",0),[],[]]
toConsider = ""
possibleWords=[]
length = minlen
wordDict = {}
beg_point = 0
states = []
if len(pattern) < minlen:
return pattern
for w in words:
if len(w) in wordDict:
wordDict[len(w)] += [w]
else:
wordDict[len(w)] = [w]
while len(patternCopy) > 1:
if length in wordDict:
for w in wordDict[length]:
snip = patternCopy[:length]
for p in range(len(snip)):
if patternCopy[p] != "-" and patternCopy[p] != w[p]:
toConsider = ""
break
toConsider = w
try:
if patternCopy[len(toConsider)] == "-" and toConsider != "" and toConsider not in possibleWords:
if scoring_f(toConsider) > bestWord[1]:
bestWord = (toConsider, scoring_f(toConsider))
except:
break
if length == maxlen:
patternCopy = patternCopy[1:]
leftHalf = pattern[:beg_point]
rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]
beg_point += 1
if len(leftHalf) > 0 and leftHalf[-1] == "-":
states.append([bestWord,leftHalf,rightHalf])
bestWord = ("",0)
length = minlen
length+=1
for s in states:
if s[0][1] > bestState[0][1]:
bestState = s
leftState = fill_words(bestState[1],words,scoring_f,minlen,maxlen)
rightState = fill_words(bestState[2],words,scoring_f,minlen,maxlen)
if len(leftState) == 0:
leftState = ""
if len(rightState) == 0:
rightState = ""
return leftState + bestState[0][0] + rightState
minlen, maxlen = 4, 30
letter_values = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,
'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1,
'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1,
'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
letter_counts = {c: 0 for c in letter_values}
with open('words_sorted.txt', 'r', encoding='utf-8') as f:
words = [x.strip() for x in f]
words = [x for x in words if minlen <= len(x) <= maxlen]
wordset = set(words)
for word in words:
for c in word:
letter_counts[c] += 1
def scrabble_value(word):
if minlen <= len(word) <= maxlen:
return sum(letter_values.get(c, 0) for c in word)
else:
return 0
def length_squared(word):
if minlen <= len(word) <= maxlen:
return len(word) ** 2
else:
return 0
def scoring_f(w):
return length_squared(w) + scrabble_value(w)
pattern = "-l-h--i-o--w--s--u--g-d-u-n-c-c--b--c-t-"
# print(pattern)
# print(fill_words(pattern,words,scoring_f,minlen,maxlen))
|
[
"#THIS BUILD WORKS, BUT IS VERY SLOW. CURRENTLY YIELDS A DECENT SCORE, NOT GREAT \n\nalphabet = \"abcdefghijklmnopqrstuvwxyz\"\n\ndef author():\n return \"\"\ndef student_id():\n return \"\"\ndef fill_words(pattern,words,scoring_f,minlen,maxlen):\n \n foundWords = find_words(pattern,words,scoring_f,minlen,maxlen)\n \n foundWords = foundWords + pattern[len(foundWords):]\n \n return foundWords\n\ndef find_words(pattern,words,scoring_f,minlen,maxlen):\n patternCopy = pattern\n bestWord=(\"\",0)\n bestState=[(\"\",0),[],[]]\n toConsider = \"\"\n possibleWords=[]\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n \n if len(pattern) < minlen:\n return pattern\n \n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != \"-\" and patternCopy[p] != w[p]:\n toConsider = \"\"\n break\n toConsider = w\n try:\n if patternCopy[len(toConsider)] == \"-\" and toConsider != \"\" and toConsider not in possibleWords:\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = (toConsider, scoring_f(toConsider))\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n \n if len(leftHalf) > 0 and leftHalf[-1] == \"-\":\n states.append([bestWord,leftHalf,rightHalf])\n\n bestWord = (\"\",0)\n length = minlen\n length+=1\n\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n \n leftState = fill_words(bestState[1],words,scoring_f,minlen,maxlen)\n rightState = fill_words(bestState[2],words,scoring_f,minlen,maxlen)\n if len(leftState) == 0:\n leftState = \"\"\n if len(rightState) == 0:\n rightState = \"\"\n return leftState + bestState[0][0] + rightState\n\n\n\nminlen, maxlen = 4, 30\nletter_values = {\n 'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1,\n 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1,\n 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10\n }\n\nletter_counts = {c: 0 for c in letter_values}\n\nwith open('words_sorted.txt', 'r', encoding='utf-8') as f:\n words = [x.strip() for x in f]\n words = [x for x in words if minlen <= len(x) <= maxlen]\n wordset = set(words)\n for word in words:\n for c in word:\n letter_counts[c] += 1\n \ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n \ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\npattern = \"-l-h--i-o--w--s--u--g-d-u-n-c-c--b--c-t-\"\n\n# print(pattern)\n# print(fill_words(pattern,words,scoring_f,minlen,maxlen))",
"alphabet = 'abcdefghijklmnopqrstuvwxyz'\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\ndef fill_words(pattern, words, scoring_f, minlen, maxlen):\n foundWords = find_words(pattern, words, scoring_f, minlen, maxlen)\n foundWords = foundWords + pattern[len(foundWords):]\n return foundWords\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\nminlen, maxlen = 4, 30\nletter_values = {'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2,\n 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3,\n 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4,\n 'z': 10}\nletter_counts = {c: (0) for c in letter_values}\nwith open('words_sorted.txt', 'r', encoding='utf-8') as f:\n words = [x.strip() for x in f]\n words = [x for x in words if minlen <= len(x) <= maxlen]\n wordset = set(words)\n for word in words:\n for c in word:\n letter_counts[c] += 1\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\npattern = '-l-h--i-o--w--s--u--g-d-u-n-c-c--b--c-t-'\n",
"<assignment token>\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\ndef fill_words(pattern, words, scoring_f, minlen, maxlen):\n foundWords = find_words(pattern, words, scoring_f, minlen, maxlen)\n foundWords = foundWords + pattern[len(foundWords):]\n return foundWords\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<assignment token>\nwith open('words_sorted.txt', 'r', encoding='utf-8') as f:\n words = [x.strip() for x in f]\n words = [x for x in words if minlen <= len(x) <= maxlen]\n wordset = set(words)\n for word in words:\n for c in word:\n letter_counts[c] += 1\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\n<assignment token>\n",
"<assignment token>\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\ndef fill_words(pattern, words, scoring_f, minlen, maxlen):\n foundWords = find_words(pattern, words, scoring_f, minlen, maxlen)\n foundWords = foundWords + pattern[len(foundWords):]\n return foundWords\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<assignment token>\n<code token>\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\ndef length_squared(word):\n if minlen <= len(word) <= maxlen:\n return len(word) ** 2\n else:\n return 0\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\n<assignment token>\n",
"<assignment token>\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\ndef fill_words(pattern, words, scoring_f, minlen, maxlen):\n foundWords = find_words(pattern, words, scoring_f, minlen, maxlen)\n foundWords = foundWords + pattern[len(foundWords):]\n return foundWords\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<assignment token>\n<code token>\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\n<function token>\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\n<assignment token>\n",
"<assignment token>\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\n<function token>\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<assignment token>\n<code token>\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\n<function token>\n\n\ndef scoring_f(w):\n return length_squared(w) + scrabble_value(w)\n\n\n<assignment token>\n",
"<assignment token>\n\n\ndef author():\n return ''\n\n\ndef student_id():\n return ''\n\n\n<function token>\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<assignment token>\n<code token>\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n",
"<assignment token>\n\n\ndef author():\n return ''\n\n\n<function token>\n<function token>\n\n\ndef find_words(pattern, words, scoring_f, minlen, maxlen):\n patternCopy = pattern\n bestWord = '', 0\n bestState = [('', 0), [], []]\n toConsider = ''\n possibleWords = []\n length = minlen\n wordDict = {}\n beg_point = 0\n states = []\n if len(pattern) < minlen:\n return pattern\n for w in words:\n if len(w) in wordDict:\n wordDict[len(w)] += [w]\n else:\n wordDict[len(w)] = [w]\n while len(patternCopy) > 1:\n if length in wordDict:\n for w in wordDict[length]:\n snip = patternCopy[:length]\n for p in range(len(snip)):\n if patternCopy[p] != '-' and patternCopy[p] != w[p]:\n toConsider = ''\n break\n toConsider = w\n try:\n if (patternCopy[len(toConsider)] == '-' and toConsider !=\n '' and toConsider not in possibleWords):\n if scoring_f(toConsider) > bestWord[1]:\n bestWord = toConsider, scoring_f(toConsider)\n except:\n break\n if length == maxlen:\n patternCopy = patternCopy[1:]\n leftHalf = pattern[:beg_point]\n rightHalf = pattern[len(leftHalf) + len(bestWord[0]):]\n beg_point += 1\n if len(leftHalf) > 0 and leftHalf[-1] == '-':\n states.append([bestWord, leftHalf, rightHalf])\n bestWord = '', 0\n length = minlen\n length += 1\n for s in states:\n if s[0][1] > bestState[0][1]:\n bestState = s\n leftState = fill_words(bestState[1], words, scoring_f, minlen, maxlen)\n rightState = fill_words(bestState[2], words, scoring_f, minlen, maxlen)\n if len(leftState) == 0:\n leftState = ''\n if len(rightState) == 0:\n rightState = ''\n return leftState + bestState[0][0] + rightState\n\n\n<assignment token>\n<code token>\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n",
"<assignment token>\n\n\ndef author():\n return ''\n\n\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n\n\ndef scrabble_value(word):\n if minlen <= len(word) <= maxlen:\n return sum(letter_values.get(c, 0) for c in word)\n else:\n return 0\n\n\n<function token>\n<function token>\n<assignment token>\n",
"<assignment token>\n\n\ndef author():\n return ''\n\n\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n",
"<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n"
] | false |
595 |
3212bb7df990ad7d075b8ca49a99e1072eab2a90
|
from typing import Type
from sqlalchemy.exc import IntegrityError
from src.main.interface import RouteInterface as Route
from src.presenters.helpers import HttpRequest, HttpResponse
from src.presenters.errors import HttpErrors
def flask_adapter(request: any, api_route: Type[Route]) -> any:
"""Adapter pattern for Flask
:param - Flask Request
:api_route: Composite Routes
"""
try:
query_string_params = request.args.to_dict()
if "account_id" in query_string_params.keys():
body = None
query_string_params["account_id"] = int(query_string_params["account_id"])
else:
body = request.json
except:
http_error = HttpErrors.error_400()
return HttpResponse(
status_code=http_error["status_code"], body=http_error["body"]
)
http_request = HttpRequest(
header=request.headers, body=body, query=query_string_params
)
try:
response = api_route.route(http_request)
except IntegrityError:
http_error = HttpErrors.error_400()
return HttpResponse(
status_code=http_error["status_code"], body=http_error["body"]
)
except Exception as exc:
print(exc)
http_error = HttpErrors.error_500()
return HttpResponse(
status_code=http_error["status_code"], body=http_error["body"]
)
return response
|
[
"from typing import Type\nfrom sqlalchemy.exc import IntegrityError\nfrom src.main.interface import RouteInterface as Route\nfrom src.presenters.helpers import HttpRequest, HttpResponse\nfrom src.presenters.errors import HttpErrors\n\n\ndef flask_adapter(request: any, api_route: Type[Route]) -> any:\n \"\"\"Adapter pattern for Flask\n :param - Flask Request\n :api_route: Composite Routes\n \"\"\"\n\n try:\n query_string_params = request.args.to_dict()\n\n if \"account_id\" in query_string_params.keys():\n body = None\n query_string_params[\"account_id\"] = int(query_string_params[\"account_id\"])\n else:\n body = request.json\n\n except:\n http_error = HttpErrors.error_400()\n return HttpResponse(\n status_code=http_error[\"status_code\"], body=http_error[\"body\"]\n )\n\n http_request = HttpRequest(\n header=request.headers, body=body, query=query_string_params\n )\n\n try:\n response = api_route.route(http_request)\n except IntegrityError:\n http_error = HttpErrors.error_400()\n return HttpResponse(\n status_code=http_error[\"status_code\"], body=http_error[\"body\"]\n )\n except Exception as exc:\n print(exc)\n http_error = HttpErrors.error_500()\n return HttpResponse(\n status_code=http_error[\"status_code\"], body=http_error[\"body\"]\n )\n\n return response\n",
"from typing import Type\nfrom sqlalchemy.exc import IntegrityError\nfrom src.main.interface import RouteInterface as Route\nfrom src.presenters.helpers import HttpRequest, HttpResponse\nfrom src.presenters.errors import HttpErrors\n\n\ndef flask_adapter(request: any, api_route: Type[Route]) ->any:\n \"\"\"Adapter pattern for Flask\n :param - Flask Request\n :api_route: Composite Routes\n \"\"\"\n try:\n query_string_params = request.args.to_dict()\n if 'account_id' in query_string_params.keys():\n body = None\n query_string_params['account_id'] = int(query_string_params[\n 'account_id'])\n else:\n body = request.json\n except:\n http_error = HttpErrors.error_400()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n http_request = HttpRequest(header=request.headers, body=body, query=\n query_string_params)\n try:\n response = api_route.route(http_request)\n except IntegrityError:\n http_error = HttpErrors.error_400()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n except Exception as exc:\n print(exc)\n http_error = HttpErrors.error_500()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n return response\n",
"<import token>\n\n\ndef flask_adapter(request: any, api_route: Type[Route]) ->any:\n \"\"\"Adapter pattern for Flask\n :param - Flask Request\n :api_route: Composite Routes\n \"\"\"\n try:\n query_string_params = request.args.to_dict()\n if 'account_id' in query_string_params.keys():\n body = None\n query_string_params['account_id'] = int(query_string_params[\n 'account_id'])\n else:\n body = request.json\n except:\n http_error = HttpErrors.error_400()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n http_request = HttpRequest(header=request.headers, body=body, query=\n query_string_params)\n try:\n response = api_route.route(http_request)\n except IntegrityError:\n http_error = HttpErrors.error_400()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n except Exception as exc:\n print(exc)\n http_error = HttpErrors.error_500()\n return HttpResponse(status_code=http_error['status_code'], body=\n http_error['body'])\n return response\n",
"<import token>\n<function token>\n"
] | false |
596 |
3a65565af4c55fa5479e323a737c48f7f2fdb8ce
|
'''
python open() 函数用于打开一个文件,创建一个 file 对象,相关的方法才可以调用它进行读写。
更多文件操作可参考:Python 文件I/O。
函数语法
open(name[, mode[, buffering]])
参数说明:
name : 一个包含了你要访问的文件名称的字符串值。
mode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。
buffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。
不同模式打开文件的完全列表:
模式
描述
r
以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
rb
以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。
r+
打开一个文件用于读写。文件指针将会放在文件的开头。
rb+
以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。
w
打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb
以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
w+
打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
wb+
以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
a
打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
ab
以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。
a+
打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。
ab+
以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。
file 对象方法
file.read([size]):size 未指定则返回整个文件,如果文件大小 >2 倍内存则有问题,f.read()读到文件尾时返回""(空字串)。
file.readline():返回一行。
file.readlines([size]) :返回包含size行的列表, size 未指定则返回全部行。
for line in f: print line :通过迭代器访问。
f.write("hello\n"):如果要写入字符串以外的数据,先将他转换为字符串。
f.tell():返回一个整数,表示当前文件指针的位置(就是到文件头的比特数)。
f.seek(偏移量,[起始位置]):用来移动文件指针。
偏移量: 单位为比特,可正可负
起始位置: 0 - 文件头, 默认值; 1 - 当前位置; 2 - 文件尾
f.close() 关闭文件
open(filename [, mode [, bufsize]])
打开一个文件,返回一个file对象。 如果文件无法打开,将处罚IOError异常。
应该使用open()来代替直接使用file类型的构造函数打开文件。
参数filename表示将要被打开的文件的路径字符串;
参数mode表示打开的模式,最常用的模式有:'r'表示读文本,'w'表示写文本文件,'a'表示在文件中追加。
Mode的默认值是'r'。
当操作的是二进制文件时,只要在模式值上添加'b'。这样提高了程序的可移植性。
可选参数bufsize定义了文件缓冲区的大小。0表示不缓冲;1表示行缓冲;任何其他正数表示使用该大小的缓冲区;
负数表示使用系统默认缓冲区大小,对于tty设备它往往是行缓冲,而对于其他文件往往完全缓冲。如果参数值被省却。
使用系统默认值。
'''
f=open('1.txt','r',encoding='utf-8')
print(f.read())
'''
输出...
ltf
zhongguo
shanxi
yuncheng
男
20
'''
#参考博客 https://www.cnblogs.com/Devilf/p/8006663.html
|
[
"'''\npython open() 函数用于打开一个文件,创建一个 file 对象,相关的方法才可以调用它进行读写。\n更多文件操作可参考:Python 文件I/O。\n函数语法\nopen(name[, mode[, buffering]])\n参数说明:\nname : 一个包含了你要访问的文件名称的字符串值。\nmode : mode 决定了打开文件的模式:只读,写入,追加等。所有可取值见如下的完全列表。这个参数是非强制的,默认文件访问模式为只读(r)。\nbuffering : 如果 buffering 的值被设为 0,就不会有寄存。如果 buffering 的值取 1,访问文件时会寄存行。如果将 buffering 的值设为大于 1 的整数,表明了这就是的寄存区的缓冲大小。如果取负值,寄存区的缓冲大小则为系统默认。\n不同模式打开文件的完全列表:\n模式\n描述\nr\n以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。\nrb\n以二进制格式打开一个文件用于只读。文件指针将会放在文件的开头。这是默认模式。\nr+\n打开一个文件用于读写。文件指针将会放在文件的开头。\nrb+\n以二进制格式打开一个文件用于读写。文件指针将会放在文件的开头。\nw\n打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nwb\n以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nw+\n打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\nwb+\n以二进制格式打开一个文件用于读写。如果该文件已存在则打开文件,并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。\na\n打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。\nab\n以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。也就是说,新的内容将会被写入到已有内容之后。如果该文件不存在,创建新文件进行写入。\na+\n打开一个文件用于读写。如果该文件已存在,文件指针将会放在文件的结尾。文件打开时会是追加模式。如果该文件不存在,创建新文件用于读写。\nab+\n以二进制格式打开一个文件用于追加。如果该文件已存在,文件指针将会放在文件的结尾。如果该文件不存在,创建新文件用于读写。\nfile 对象方法\nfile.read([size]):size 未指定则返回整个文件,如果文件大小 >2 倍内存则有问题,f.read()读到文件尾时返回\"\"(空字串)。\nfile.readline():返回一行。\nfile.readlines([size]) :返回包含size行的列表, size 未指定则返回全部行。\nfor line in f: print line :通过迭代器访问。\nf.write(\"hello\\n\"):如果要写入字符串以外的数据,先将他转换为字符串。\nf.tell():返回一个整数,表示当前文件指针的位置(就是到文件头的比特数)。\nf.seek(偏移量,[起始位置]):用来移动文件指针。\n偏移量: 单位为比特,可正可负\n起始位置: 0 - 文件头, 默认值; 1 - 当前位置; 2 - 文件尾\nf.close() 关闭文件\n\n\nopen(filename [, mode [, bufsize]])\n打开一个文件,返回一个file对象。 如果文件无法打开,将处罚IOError异常。\n应该使用open()来代替直接使用file类型的构造函数打开文件。\n参数filename表示将要被打开的文件的路径字符串;\n参数mode表示打开的模式,最常用的模式有:'r'表示读文本,'w'表示写文本文件,'a'表示在文件中追加。\nMode的默认值是'r'。\n当操作的是二进制文件时,只要在模式值上添加'b'。这样提高了程序的可移植性。\n可选参数bufsize定义了文件缓冲区的大小。0表示不缓冲;1表示行缓冲;任何其他正数表示使用该大小的缓冲区;\n负数表示使用系统默认缓冲区大小,对于tty设备它往往是行缓冲,而对于其他文件往往完全缓冲。如果参数值被省却。\n使用系统默认值。\n'''\n\nf=open('1.txt','r',encoding='utf-8')\nprint(f.read())\n'''\n输出...\nltf\nzhongguo\nshanxi\nyuncheng\n男\n20\n'''\n\n#参考博客 https://www.cnblogs.com/Devilf/p/8006663.html\n\n",
"<docstring token>\nf = open('1.txt', 'r', encoding='utf-8')\nprint(f.read())\n<docstring token>\n",
"<docstring token>\n<assignment token>\nprint(f.read())\n<docstring token>\n",
"<docstring token>\n<assignment token>\n<code token>\n<docstring token>\n"
] | false |
597 |
c0348fc5f51e6f7a191fea6d0e3cb84c60b03e22
|
'''
Условие
Дано два числа a и b. Выведите гипотенузу треугольника с заданными катетами.
'''
import math
a = int(input())
b = int(input())
print(math.sqrt(a * a + b * b))
|
[
"'''\nУсловие\nДано два числа a и b. Выведите гипотенузу треугольника с заданными катетами.\n'''\nimport math\na = int(input())\nb = int(input())\nprint(math.sqrt(a * a + b * b))"
] | true |
598 |
8ac84aa29e9e4f3b85f1b3c27819feb5f41e8d8e
|
import os
import factorStatFileCreator
dirName = 'NoPerms/'
dirName2 = 'AllPerms/'
freqAgentDic = dict()
lenAgentDic = dict()
contAgentDic = dict()
def freqModAvgFunc(dirName):
fullList = factorStatFileCreator.directoryFreq(dirName)
UA = dirName.split("/")[1]
avgList = []
sum = 0
i = 0
while i <= len(fullList) - 2:
diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i+1])
if diff == None:
i+=1
else:
avgList.append(int(diff))
i+=1
for item in avgList:
sum += item
if len(avgList) != 0:
if UA not in freqAgentDic.keys():
freqAgentDic[UA] = [sum/len(avgList)]
else:
agentList = freqAgentDic[UA]
agentList.append(sum/len(avgList))
freqAgentDic[UA] = agentList
def finalFreqFunc(dirName):
for filename in os.listdir(dirName):
file = dirName + filename
freqModAvgFunc(file)
def printFreqDiff():
finalFreqFunc(dirName)
finalFreqFunc(dirName2)
#print (freqAgentDic)
for keys, vals in freqAgentDic.items():
if len(vals) > 1 and vals[1] > 0:
score = vals[0] / vals[1]
print ("{:<15}: {:.2f}".format(keys,score))
else:
score = "N/A"
print ("{:<15}: {}".format(keys,score))
freqAgentDic[keys] = score
return (freqAgentDic)
def avgModFunc(directory):
sum = 0
UA = directory.split("/")[1]
byteList = factorStatFileCreator.directoryLen(directory)
for item in byteList:
sum += item
if len(byteList) != 0:
if UA not in lenAgentDic.keys():
lenAgentDic[UA] = [sum/len(byteList)]
else:
agentList = lenAgentDic[UA]
agentList.append(sum/len(byteList))
lenAgentDic[UA] = agentList
def finalLenFunc(dirName):
for filename in os.listdir(dirName):
file = dirName + filename
avgModFunc(file)
def printLenDiff():
finalLenFunc(dirName)
finalLenFunc(dirName2)
for keys, vals in lenAgentDic.items():
if len(vals) > 1 and vals[1] > 0:
score = vals[1] / vals[0]
print ("{:<15}: {:.2f}".format(keys,score))
else:
score = "N/A"
print ("{:<15}: {}".format(keys,score))
lenAgentDic[keys] = score
return lenAgentDic
def directoryModCont(directory):
contentSet = set()
newSet = set()
listHolder = []
numofReq = 0
UA = directory.split("/")[1]
for filename in os.listdir(directory):
file = directory + '/' + filename
listHolder = factorStatFileCreator.contentCommand(file)
#print(newSet)
newSet = listHolder[0]
numofReq += len(listHolder[1])
contentSet = contentSet|newSet
newSet = set()
if UA not in contAgentDic.keys():
contAgentDic[UA] = [numofReq]
else:
agentList = contAgentDic[UA]
agentList.append(numofReq)
contAgentDic[UA] = agentList
return contentSet, numofReq
def finalContFunc(dirName):
for filename in os.listdir(dirName):
file = dirName + filename
directoryModCont(file)
def printContDiff():
finalContFunc(dirName)
finalContFunc(dirName2)
for keys, vals in contAgentDic.items():
if len(vals) > 1 and vals[1] > 0:
score = vals[0] / vals[1]
print ("{:<15}: {:.2f}".format(keys,score))
else:
score = "N/A"
print ("{:<15}: {}".format(keys,score))
contAgentDic[keys] = score
return contAgentDic
|
[
"import os\nimport factorStatFileCreator\n\ndirName = 'NoPerms/'\ndirName2 = 'AllPerms/'\n\nfreqAgentDic = dict()\nlenAgentDic = dict()\ncontAgentDic = dict()\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split(\"/\")[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i+1])\n if diff == None:\n i+=1\n else:\n avgList.append(int(diff))\n i+=1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum/len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum/len(avgList))\n freqAgentDic[UA] = agentList\n\ndef finalFreqFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n freqModAvgFunc(file)\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n #print (freqAgentDic)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print (\"{:<15}: {:.2f}\".format(keys,score))\n else:\n score = \"N/A\"\n print (\"{:<15}: {}\".format(keys,score))\n freqAgentDic[keys] = score\n return (freqAgentDic)\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split(\"/\")[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum/len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum/len(byteList))\n lenAgentDic[UA] = agentList\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print (\"{:<15}: {:.2f}\".format(keys,score))\n else:\n score = \"N/A\"\n print (\"{:<15}: {}\".format(keys,score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split(\"/\")[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n #print(newSet)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet|newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print (\"{:<15}: {:.2f}\".format(keys,score))\n else:\n score = \"N/A\"\n print (\"{:<15}: {}\".format(keys,score))\n contAgentDic[keys] = score\n return contAgentDic\n",
"import os\nimport factorStatFileCreator\ndirName = 'NoPerms/'\ndirName2 = 'AllPerms/'\nfreqAgentDic = dict()\nlenAgentDic = dict()\ncontAgentDic = dict()\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\ndef finalFreqFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n freqModAvgFunc(file)\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n",
"<import token>\ndirName = 'NoPerms/'\ndirName2 = 'AllPerms/'\nfreqAgentDic = dict()\nlenAgentDic = dict()\ncontAgentDic = dict()\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\ndef finalFreqFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n freqModAvgFunc(file)\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n",
"<import token>\n<assignment token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\ndef finalFreqFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n freqModAvgFunc(file)\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n",
"<import token>\n<assignment token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\ndef finalLenFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n avgModFunc(file)\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n",
"<import token>\n<assignment token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\ndef finalContFunc(dirName):\n for filename in os.listdir(dirName):\n file = dirName + filename\n directoryModCont(file)\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n",
"<import token>\n<assignment token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printFreqDiff():\n finalFreqFunc(dirName)\n finalFreqFunc(dirName2)\n for keys, vals in freqAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n freqAgentDic[keys] = score\n return freqAgentDic\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\n<function token>\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n",
"<import token>\n<assignment token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\n<function token>\n<function token>\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\n<function token>\n\n\ndef printContDiff():\n finalContFunc(dirName)\n finalContFunc(dirName2)\n for keys, vals in contAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[0] / vals[1]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n contAgentDic[keys] = score\n return contAgentDic\n",
"<import token>\n<assignment token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\n<function token>\n<function token>\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\ndef directoryModCont(directory):\n contentSet = set()\n newSet = set()\n listHolder = []\n numofReq = 0\n UA = directory.split('/')[1]\n for filename in os.listdir(directory):\n file = directory + '/' + filename\n listHolder = factorStatFileCreator.contentCommand(file)\n newSet = listHolder[0]\n numofReq += len(listHolder[1])\n contentSet = contentSet | newSet\n newSet = set()\n if UA not in contAgentDic.keys():\n contAgentDic[UA] = [numofReq]\n else:\n agentList = contAgentDic[UA]\n agentList.append(numofReq)\n contAgentDic[UA] = agentList\n return contentSet, numofReq\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n\n\ndef freqModAvgFunc(dirName):\n fullList = factorStatFileCreator.directoryFreq(dirName)\n UA = dirName.split('/')[1]\n avgList = []\n sum = 0\n i = 0\n while i <= len(fullList) - 2:\n diff = factorStatFileCreator.diffFunc(fullList[i], fullList[i + 1])\n if diff == None:\n i += 1\n else:\n avgList.append(int(diff))\n i += 1\n for item in avgList:\n sum += item\n if len(avgList) != 0:\n if UA not in freqAgentDic.keys():\n freqAgentDic[UA] = [sum / len(avgList)]\n else:\n agentList = freqAgentDic[UA]\n agentList.append(sum / len(avgList))\n freqAgentDic[UA] = agentList\n\n\n<function token>\n<function token>\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\n<function token>\n\n\ndef printLenDiff():\n finalLenFunc(dirName)\n finalLenFunc(dirName2)\n for keys, vals in lenAgentDic.items():\n if len(vals) > 1 and vals[1] > 0:\n score = vals[1] / vals[0]\n print('{:<15}: {:.2f}'.format(keys, score))\n else:\n score = 'N/A'\n print('{:<15}: {}'.format(keys, score))\n lenAgentDic[keys] = score\n return lenAgentDic\n\n\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n\n\ndef avgModFunc(directory):\n sum = 0\n UA = directory.split('/')[1]\n byteList = factorStatFileCreator.directoryLen(directory)\n for item in byteList:\n sum += item\n if len(byteList) != 0:\n if UA not in lenAgentDic.keys():\n lenAgentDic[UA] = [sum / len(byteList)]\n else:\n agentList = lenAgentDic[UA]\n agentList.append(sum / len(byteList))\n lenAgentDic[UA] = agentList\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
599 |
f733885eed5d1cbf6e49db0997655ad627c9d795
|
from django import template
register = template.Library()
@register.filter(name='range')
def filter_range(start, end=None):
if end is None:
return range(start)
else:
return range(start, end)
|
[
"from django import template\n\nregister = template.Library()\n\n\[email protected](name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"from django import template\nregister = template.Library()\n\n\[email protected](name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"<import token>\nregister = template.Library()\n\n\[email protected](name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"<import token>\n<assignment token>\n\n\[email protected](name='range')\ndef filter_range(start, end=None):\n if end is None:\n return range(start)\n else:\n return range(start, end)\n",
"<import token>\n<assignment token>\n<function token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.